initial project commit

This commit is contained in:
2024-02-05 16:28:05 -05:00
commit 5861055c15
3299 changed files with 458518 additions and 0 deletions

View File

@ -0,0 +1,5 @@
---
exclude_paths:
- .github
- changelogs
...

View File

@ -0,0 +1,2 @@
[pycodestyle]
max-line-length=120

View File

@ -0,0 +1,78 @@
[MESSAGES CONTROL]
disable=
# "F" Fatal errors that prevent further processing
# import-error,
# "I" Informational noise
# "E" Error for important programming issues (likely bugs)
# no-member,
# no-name-in-module,
# raising-bad-type,
# "W" Warnings for stylistic problems or minor programming issues
# no-absolute-import,
# arguments-differ,
# cell-var-from-loop,
# fixme,
; lost-exception,
; no-init,
; pointless-string-statement,
; protected-access,
; redefined-outer-name,
; relative-import,
; undefined-loop-variable,
; unsubscriptable-object,
# unused-argument,
; unused-import,
; unspecified-encoding,
# "C" Coding convention violations
; bad-continuation,
; missing-docstring,
; wrong-import-order,
; use-maxsplit-arg,
; consider-using-dict-items,
; consider-using-enumerate,
# "R" Refactor recommendations
; duplicate-code,
; no-self-use,
; too-few-public-methods,
; too-many-branches,
; too-many-locals,
; too-many-statements,
; consider-using-from-import,
; use-list-literal,
; use-dict-literal,
# new for python3 version of pylint
; useless-object-inheritance,
; consider-using-set-comprehension, # pylint3 force to use comprehension in place we don't want (py2 doesn't have these options, for inline skip)
; unnecessary-pass,
; invalid-envvar-default, # pylint3 warnings envvar returns str/none by default
; bad-option-value, # python 2 doesn't have import-outside-toplevel, but in some case we need to import outside toplevel
; super-with-arguments, # required in python 2
; raise-missing-from, # no 'raise from' in python 2
; use-a-generator, # cannot be modified because of Python2 support
; consider-using-with, # on bunch spaces we cannot change that...
; duplicate-string-formatting-argument, # TMP: will be fixed in close future
consider-using-f-string, # sorry, not gonna happen, still have to support py2
; use-dict-literal
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
[DESIGN]
max-args=11 # 2x + 1 from default
max-attributes=21 # 4x + 1 from default
[REPORTS]
msg-template='[{msg_id} {symbol}] {msg} File: {path}, line {line}, in {obj}'
[BASIC]
# In order to make a check more strict add proper regex http://pylint-messages.wikidot.com/messages:c0103
argument-rgx=.*
attr-rgx=.*
class-rgx=.*
const-rgx=.*
function-rgx=.*
method-rgx=.*
module-rgx=.*
variable-rgx=.*
inlinevar-rgx=.*

View File

@ -0,0 +1,4 @@
rules:
indentation:
spaces: 2
indent-sequences: false

View File

@ -0,0 +1,108 @@
=====================================
LVM Snapshot Linux Role Release Notes
=====================================
.. contents:: Topics
v2.0.1
======
Minor Changes
-------------
- Add publish to Automation Hub to release workflow
Bugfixes
--------
- Fix release workflow prechecks
v2.0.0
======
Minor Changes
-------------
- bigboot - Rename internal variables with role name prefix
- initramfs - Rename internal variables with role name prefix
- shrink_lv - Rename internal variables with role name prefix
Breaking Changes / Porting Guide
--------------------------------
- Split lvm_snapshots role into snapshot_create, snapshot_revert and snapshot_remove
v1.1.2
======
Minor Changes
-------------
- Updated links in docs and workflows to reflect move to redhat-cop org
v1.1.1
======
Bugfixes
--------
- Fix "Failed to list block device properties" error
- Fix dracut path
v1.1.0
======
Major Changes
-------------
- New role, bigboot, to increase the boot partition while moving, and shrinking if needed, the adjacent partition
- New role, initramfs, to execute an atomic flow of building and using a temporary initramfs in a reboot and restoring the original one
- New role, shrink_lv, to decrease logical volume size along with the filesystem
v1.0.3
======
Minor Changes
-------------
- Changed the lvm_snapshots_boot_backup var default to false
- Removed unimplemented lvm_snapshots_use_boom var from the docs
- Revert - wait for snapshot to drain before returning
Bugfixes
--------
- Add task to ensure tar package is present
- Grub needs reinstall if /boot is on LVM
- Wrong kernel version booting after rolling back
v1.0.2
======
Minor Changes
-------------
- Create snapshots with normalized sizes
Bugfixes
--------
- Existing Snapshots with Different Name Cause verify_no_existing_snapshot.yml to Fail
v1.0.1
======
Major Changes
-------------
- Initial MVP release
Minor Changes
-------------
- Add boot backup support
- Add support for checking before resizing logical volumes
v1.0.0
======

View File

@ -0,0 +1,3 @@
@swapdisk
@ygalblum
@heatmiser

View File

@ -0,0 +1,5 @@
# Contributing
Thank you for your interest in contributing to the LVM Snapshots Collection. All we ask is that contributors please observe the [Ansible Community Guidelines](https://docs.ansible.com/ansible/devel/community/index.html) and follow the [Ansible Collections Contributor Guide](https://docs.ansible.com/ansible/devel/community/contributions_collections.html). We look forward to reviewing your pull request.
Everyone is invited to participate. We welcome first timers as well as experienced open source contributors. If you are unsure how to get started with your contributon, open a [new issue](https://github.com/redhat-cop/infra.lvm_snapshots/issues/new/choose) explaining what you want to do and we'll do our best to help!

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 Red Hat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,84 @@
# LVM Snapshots Collection
[![Ansible Lint](https://github.com/redhat-cop/infra.lvm_snapshots/workflows/Ansible%20Lint/badge.svg?event=push)](https://github.com/redhat-cop/infra.lvm_snapshots/actions) [![PyLint](https://github.com/redhat-cop/infra.lvm_snapshots/workflows/PyLint/badge.svg?event=push)](https://github.com/redhat-cop/infra.lvm_snapshots/actions) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8141/badge)](https://www.bestpractices.dev/projects/8141)
## Overview
A reliable snapshot/rollback capability is a key feature required to enable the success of RHEL In-place Upgrade automation solutions. Without it, users will be wary of using the solution because of the potential risk that their applications may not function properly after the OS upgrade. Including automation so that snapshot creation happens right before the OS upgrade reduces this risk. If there are any application issues uncovered after the OS upgrade, a rollback playbook can be executed to instantly revert the environment back to the original state as it was before the upgrade. Application teams will no longer have an excuse not to use in-place upgrades to bring their RHEL estate into compliance.
## Requirements
### Ansible Version
This collection requires ansible from version `2.14.0` and above
### Collections
This collection depends on the following collections
```yaml
- "community.general": "*"
```
## Roles
These are the roles included in the collection. Follow the links below to see the detailed documentation and example playbooks for each role.
- [`snapshot_create`](./roles/snapshot_create/) - controls the creation for a defined set of LVM snapshot volumes
- [`snapshot_remove`](./roles/snapshot_remove/) - used to remove snapshots previously created using the `snapshot_create` role
- [`snapshot_revert`](./roles/snapshot_revert/) - used to revert to snapshots previously created using the `snapshot_create` role
- [`shrink_lv`](./roles/shrink_lv/) - controls decreasing logical volume size along with the filesystem
- [`bigboot`](./roles/bigboot/) - controls increasing of the boot partition while moving, and shrinking if needed, the adjacent partition
- [`initramfs`](./roles/initramfs/) - controls the atomic flow of building and using a temporary initramfs in a reboot and restoring the original one
## Installing the collection from Ansible Galaxy
Before using this collection, you need to install it with the Ansible Galaxy command-line tool:
```bash
ansible-galaxy collection install infra.lvm_snapshots
```
You can also include it in a `requirements.yml` file and install it with `ansible-galaxy collection install -r requirements.yml`, using the format:
```yaml
---
collections:
- name: infra.lvm_snapshots
```
Note that if you install the collection from Ansible Galaxy, it will not be upgraded automatically when you upgrade the `ansible` package. To upgrade the collection to the latest available version, run the following command:
```bash
ansible-galaxy collection install infra.lvm_snapshots --upgrade
```
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax to install version `1.0.0`:
```bash
ansible-galaxy collection install infra.lvm_snapshots:==1.0.0
```
See [Using Ansible collections](https://docs.ansible.com/ansible/devel/user_guide/collections_using.html) for more details.
## Contributing
We appreciate participation from any new contributors. Get started by opening an issue or pull request. Refer to our [contribution guide](CONTRIBUTING.md) for more information.
## Reporting issues
Please open a [new issue](https://github.com/redhat-cop/infra.lvm_snapshots/issues/new/choose) for any bugs or security vulnerabilities you may encounter. We also invite you to open an issue if you have ideas on how we can improve the solution or want to make a suggestion for enhancement.
## More information
This collection is just one building block of our larger initiative to make RHEL in-place upgrade automation that works at enterprise scale. Learn more about our end-to-end approach for automating RHEL in-place upgrades at this [blog post](https://red.ht/bobblog).
## Release notes
See the [changelog](https://github.com/redhat-cop/infra.lvm_snapshots/tree/main/CHANGELOG.rst).
## Licensing
MIT
See [LICENSE](LICENSE) to see the full text.

View File

@ -0,0 +1,19 @@
objects:
role: {}
plugins:
become: {}
cache: {}
callback: {}
cliconf: {}
connection: {}
filter: {}
httpapi: {}
inventory: {}
lookup: {}
module: {}
netconf: {}
shell: {}
strategy: {}
test: {}
vars: {}
version: 2.0.1

View File

@ -0,0 +1,95 @@
ancestor: null
releases:
1.0.0:
release_date: '2023-08-03'
1.0.1:
changes:
major_changes:
- Initial MVP release
minor_changes:
- Add boot backup support
- Add support for checking before resizing logical volumes
fragments:
- boot_backup.yml
- check_before_resize.yml
- release.yml
release_date: '2023-08-04'
1.0.2:
changes:
bugfixes:
- Existing Snapshots with Different Name Cause verify_no_existing_snapshot.yml
to Fail
minor_changes:
- Create snapshots with normalized sizes
fragments:
- create_snapshots_with_normalized_sizes.yml
- filtering_by_lvname_on_existing_snapshot_check.yml
release_date: '2023-08-31'
1.0.3:
changes:
bugfixes:
- Add task to ensure tar package is present
- Grub needs reinstall if /boot is on LVM
- Wrong kernel version booting after rolling back
minor_changes:
- Changed the lvm_snapshots_boot_backup var default to false
- Removed unimplemented lvm_snapshots_use_boom var from the docs
- Revert - wait for snapshot to drain before returning
fragments:
- fix-lvm-grub.yml
- minor-var-changes.yml
- revert-wait-for-drain.yml
- tar-present.yml
- wrong-kernel.yml
release_date: '2023-11-29'
1.1.0:
changes:
major_changes:
- New role, bigboot, to increase the boot partition while moving, and shrinking
if needed, the adjacent partition
- New role, initramfs, to execute an atomic flow of building and using a temporary
initramfs in a reboot and restoring the original one
- New role, shrink_lv, to decrease logical volume size along with the filesystem
fragments:
- add-bigboot-role.yml
- add-shrink-lv.yml
release_date: '2023-11-30'
1.1.1:
changes:
bugfixes:
- Fix "Failed to list block device properties" error
- Fix dracut path
fragments:
- fix_entries.yml
- no_sbin_dracut.yml
release_date: '2023-12-05'
1.1.2:
changes:
minor_changes:
- Updated links in docs and workflows to reflect move to redhat-cop org
fragments:
- update_links.yml
release_date: '2023-12-13'
2.0.0:
changes:
breaking_changes:
- Split lvm_snapshots role into snapshot_create, snapshot_revert and snapshot_remove
minor_changes:
- bigboot - Rename internal variables with role name prefix
- initramfs - Rename internal variables with role name prefix
- shrink_lv - Rename internal variables with role name prefix
fragments:
- bigboot-internal-variable-names.yml
- initramfs-internal-variable-names.yml
- shrinklv-internal-variable-names.yml
- split-lvm-snapshot_role.yml
release_date: '2024-01-10'
2.0.1:
changes:
bugfixes:
- Fix release workflow prechecks
minor_changes:
- Add publish to Automation Hub to release workflow
fragments:
- automation_hub_release.yml
release_date: '2024-01-11'

View File

@ -0,0 +1,32 @@
changelog_filename_template: ../CHANGELOG.rst
changelog_filename_version_depth: 0
changes_file: changelog.yaml
changes_format: combined
ignore_other_fragment_extensions: true
keep_fragments: false
mention_ancestor: true
new_plugins_after_name: removed_features
notesdir: fragments
prelude_section_name: release_summary
prelude_section_title: Release Summary
sanitize_changelog: true
sections:
- - major_changes
- Major Changes
- - minor_changes
- Minor Changes
- - breaking_changes
- Breaking Changes / Porting Guide
- - deprecated_features
- Deprecated Features
- - removed_features
- Removed Features (previously deprecated)
- - security_fixes
- Security Fixes
- - bugfixes
- Bugfixes
- - known_issues
- Known Issues
title: LVM Snapshot Linux Role
trivial_section_name: trivial
use_fqcn: true

View File

@ -0,0 +1,6 @@
minor_changes:
- Add bigboot progress messages so inpatient operators don't think their server is hung
bugfixes:
- Shorten bigboot.sh usage help message to not exceed the kmsg buffer
- Fix bigboot device not found error
- Fix vgs not found error

View File

@ -0,0 +1,31 @@
---
namespace: infra
name: lvm_snapshots
version: 2.0.1
readme: README.md
authors:
- Ygal Blum <yblum@redhat.com>
- Bob Mader <bob@redhat.com>
description: Ansible role for creating and rolling back LVM snapshots
license_file: LICENSE
tags:
- ansible
- lvm
- storage
dependencies:
"community.general": "*"
repository: https://github.com/redhat-cop/infra.lvm_snapshots
documentation: https://github.com/redhat-cop/infra.lvm_snapshots
homepage: https://github.com/redhat-cop/infra.lvm_snapshots
issues: https://github.com/redhat-cop/infra.lvm_snapshots/issues
build_ignore:
- .ansible-lint
- .pylintrc
- .yamllint
- .git
- .gitignore
- ansible.cfg
- requirements.yml
- tests
- vendor
...

View File

@ -0,0 +1,2 @@
---
requires_ansible: '>=2.14.0'

View File

@ -0,0 +1,4 @@
---
collections:
- name: community.general
...

View File

@ -0,0 +1,42 @@
# bigboot
The `bigboot` role is used to increase boot partition.
The role is designed to support the automation of RHEL in-place upgrades, but can also be used for other purposes.
## Contents
The role contains the shell scripts to increase the size of the boot partition, as well as the script wrapping it to run as part of the pre-mount step during the boot process.
Finally, there is a copy of the [`sfdisk`](https://man7.org/linux/man-pages/man8/sfdisk.8.html) binary with version `2.38.1` to ensure the extend script will work regardless of the `util-linux` package installed in the target host.
## Role Variables
### `bigboot_size`
The variable `bigboot_size` sets the target size of the boot partition after the role has completed.
The value can be either in bytes or with optional single letter suffix (1024 bases).
See `Unit options` type `iec` of [`numfmt`](https://man7.org/linux/man-pages/man1/numfmt.1.html)
## Example of a playbook to run the role
The following yaml is an example of a playbook that runs the role against a group of hosts named `rhel` and increasing the size of its boot partition by 1G.
The boot partition is automatically retrieved by the role by identifying the existing mounted partition to `/boot` and passing the information to the script using the `kernel_opts`.
```yaml
- name: Extend boot partition playbook
hosts: all
vars:
bigboot_size: 1G
roles:
- bigboot
```
# Validate execution
The script will add an entry to the kernel messages (`/dev/kmsg`) with success or failure and the time it took to process.
In case of failure, it may also include an error message retrieved from the execution of the script.
A successful execution will look similar to this:
```bash
[root@localhost ~]# dmesg |grep pre-mount
[ 357.163522] [dracut-pre-mount] Boot partition /dev/vda1 successfully increased by 1G (356 seconds)
```

View File

@ -0,0 +1,547 @@
#!/bin/bash
#
# Script to increase the ext4/xfs boot partition in a BIOS system by shifting
# the adjacent partition to the boot partition by the parametrized size. It
# expects the device to have enough free space to shift to the right of the
# adjacent partition, that is towards the end of the device. It only works
# with ext4 and xfs file systems and supports adjacent partitions as primary
# or logical partitions and LVM in the partition.
#
# The parametrized size supports M for MiB and G for GiB. If no units is given,
# it is interpreted as bytes
#
# Usage: bigboot.sh -d=<device_name> -s=<increase_size_with_units> -b=<boot_partition_number> -p=<partition_prefix>
#
# Example
# Given this device partition:
# Number Start End Size Type File system Flags
# 32.3kB 1049kB 1016kB Free Space
# 1 1049kB 11.1GB 11.1GB primary ext4 boot
# 2 11.1GB 32.2GB 21.1GB extended
# 5 11.1GB 32.2GB 21.1GB logical ext4
#
# Running the command:
# $>bigboot.sh -d=/dev/sda -s=1G -b=1
#
# Will increase the boot partition in /dev/vdb by 1G and shift the adjacent
# partition in the device by the equal amount.
#
# Number Start End Size Type File system Flags
# 32.3kB 1049kB 1016kB Free Space
# 1 1049kB 12.2GB 12.2GB primary ext4 boot
# 2 12.2GB 32.2GB 20.0GB extended
# 5 12.2GB 32.2GB 20.0GB logical ext4
#
# Command parameters
INCREMENT_BOOT_PARTITION_SIZE=
DEVICE_NAME=
BOOT_PARTITION_NUMBER=
PARTITION_PREFIX=
# Script parameters
ADJACENT_PARTITION_NUMBER=
BOOT_FS_TYPE=
EXTENDED_PARTITION_TYPE=extended
LOGICAL_VOLUME_DEVICE_NAME=
INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES=
SHRINK_SIZE_IN_BYTES=
print_help(){
echo "Usage: $(basename "$0") -d=<device_name> -s=<increase_size_with_units> -b=<boot_partition_number> -p=<partition_prefix>"
}
get_device_type(){
local device=$1
val=$(/usr/bin/lsblk "$device" -o type --noheadings 2>&1)
local status=$?
if [[ status -ne 0 ]]; then
echo "Failed to retrieve device type for $device: $val"
exit 1
fi
type=$(tail -n1 <<<"$val")
if [[ -z $type ]]; then
echo "Unknown device type for $device"
exit 1
fi
echo "$type"
}
ensure_device_not_mounted() {
local device=$1
local devices_to_check
device_type=$(get_device_type "$device")
if [[ $device_type == "lvm" ]]; then
# It's an LVM block device
# Capture the LV device names. Since we'll have to shift the partition, we need to make sure all LVs are not mounted in the adjacent partition.
devices_to_check=$(/usr/sbin/lvm pvdisplay "$device" -m |/usr/bin/grep "Logical volume" |/usr/bin/awk '{print $3}')
else
# Use the device and partition number instead
devices_to_check=$device
fi
for device_name in $devices_to_check; do
/usr/bin/findmnt --source "$device_name" 1>&2>/dev/null
status=$?
if [[ status -eq 0 ]]; then
echo "Device $device_name is mounted"
exit 1
fi
done
}
validate_device() {
local device=$1
if [[ -z "${device}" ]]; then
echo "Missing device name"
print_help
exit 1
fi
if [[ ! -e "${device}" ]]; then
echo "Device ${device} not found"
exit 1
fi
ret=$(/usr/sbin/fdisk -l "${device}" 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to open device ${device}: $ret"
exit 1
fi
}
validate_increment_partition_size() {
if [[ -z "$INCREMENT_BOOT_PARTITION_SIZE" ]]; then
echo "Missing incremental size for boot partition"
print_help
exit 1
fi
ret=$(/usr/bin/numfmt --from=iec "$INCREMENT_BOOT_PARTITION_SIZE" 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
echo "Invalid size value for '$INCREMENT_BOOT_PARTITION_SIZE': $ret"
exit $status
fi
INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES=$ret
}
# Capture all parameters:
# Mandatory: Device, Size and Boot Partition Number
# Optional: Partition Prefix (e.g. "p" for nvme based volumes)
parse_flags() {
for i in "$@"
do
case $i in
-d=*|--device=*)
DEVICE_NAME="${i#*=}"
;;
-s=*|--size=*)
INCREMENT_BOOT_PARTITION_SIZE="${i#*=}"
;;
-b=*|--boot=*)
BOOT_PARTITION_NUMBER="${i#*=}"
;;
-p=*|--prefix=*)
PARTITION_PREFIX="${i#*=}"
;;
-h)
print_help
exit 0
;;
*)
# unknown option
echo "Unknown flag $i"
print_help
exit 1
;;
esac
done
}
validate_parameters() {
validate_device "${DEVICE_NAME}"
validate_increment_partition_size
# Make sure BOOT_PARTITION_NUMBER is set to avoid passing only DEVICE_NAME
if [[ -z "$BOOT_PARTITION_NUMBER" ]]; then
echo "Boot partition number was not set"
print_help
exit 1
fi
validate_device "${DEVICE_NAME}${PARTITION_PREFIX}${BOOT_PARTITION_NUMBER}"
ensure_device_not_mounted "${DEVICE_NAME}${PARTITION_PREFIX}${BOOT_PARTITION_NUMBER}"
ensure_extendable_fs_type "${DEVICE_NAME}${PARTITION_PREFIX}${BOOT_PARTITION_NUMBER}"
}
get_fs_type(){
local device=$1
ret=$(/usr/sbin/blkid "$device" -o udev | sed -n -e 's/ID_FS_TYPE=//p' 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
exit $status
fi
echo "$ret"
}
ensure_extendable_fs_type(){
local device=$1
ret=$(get_fs_type "$device")
if [[ "$ret" != "ext4" ]] && [[ "$ret" != "xfs" ]]; then
echo "Boot file system type $ret is not extendable"
exit 1
fi
BOOT_FS_TYPE=$ret
}
get_successive_partition_number() {
boot_line_number=$(/usr/sbin/parted -m "$DEVICE_NAME" print |/usr/bin/sed -n '/^'"$BOOT_PARTITION_NUMBER"':/ {=}')
status=$?
if [[ $status -ne 0 ]]; then
echo "Unable to identify boot partition number for '$DEVICE_NAME'"
exit $status
fi
if [[ -z "$boot_line_number" ]]; then
echo "No boot partition found"
exit 1
fi
# get the extended partition number in case there is one, we will need to shrink it as well
EXTENDED_PARTITION_NUMBER=$(/usr/sbin/parted "$DEVICE_NAME" print | /usr/bin/sed -n '/'"$EXTENDED_PARTITION_TYPE"'/p'|awk '{print $1}')
if [[ -n "$EXTENDED_PARTITION_NUMBER" ]]; then
# if there's an extended partition, use the last one as the target partition to shrink
ADJACENT_PARTITION_NUMBER=$(/usr/sbin/parted "$DEVICE_NAME" print |grep -v "^$" |awk 'END{print$1}')
else
# get the partition number from the next line after the boot partition
ADJACENT_PARTITION_NUMBER=$(/usr/sbin/parted -m "$DEVICE_NAME" print | /usr/bin/awk -F ':' '/'"^$BOOT_PARTITION_NUMBER:"'/{getline;print $1}')
fi
if ! [[ $ADJACENT_PARTITION_NUMBER == +([[:digit:]]) ]]; then
echo "Invalid successive partition number '$ADJACENT_PARTITION_NUMBER'"
exit 1
fi
ensure_device_not_mounted "${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
}
init_variables(){
parse_flags "$@"
validate_parameters
get_successive_partition_number
}
check_filesystem(){
local device=$1
fstype=$(get_fs_type "$device")
if [[ "$fstype" == "swap" ]]; then
echo "Warning: cannot run fsck to a swap partition for $device"
return 0
fi
if [[ "$BOOT_FS_TYPE" == "ext4" ]]; then
# Retrieve the estimated minimum size in bytes that the device can be shrank
ret=$(/usr/sbin/e2fsck -fy "$device" 2>&1)
local status=$?
if [[ status -ne 0 ]]; then
echo "Warning: File system check failed for $device: $ret"
fi
fi
}
convert_size_to_fs_blocks(){
local device=$1
local size=$2
block_size_in_bytes=$(/usr/sbin/tune2fs -l "$device" | /usr/bin/awk '/Block size:/{print $3}')
echo $(( size / block_size_in_bytes ))
}
calculate_expected_resized_file_system_size_in_blocks(){
local device=$1
increment_boot_partition_in_blocks=$(convert_size_to_fs_blocks "$device" "$INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES")
total_block_count=$(/usr/sbin/tune2fs -l "$device" | /usr/bin/awk '/Block count:/{print $3}')
new_fs_size_in_blocks=$(( total_block_count - increment_boot_partition_in_blocks ))
echo $new_fs_size_in_blocks
}
get_free_device_size() {
free_space=$(/usr/sbin/parted -m "$DEVICE_NAME" unit b print free | /usr/bin/awk -F':' '/'"^$ADJACENT_PARTITION_NUMBER:"'/{getline;print $0}'|awk -F':' '/free/{print $4}'|sed -e 's/B//g')
echo "$free_space"
}
get_volume_group_name(){
local volume_group_name
ret=$(/usr/sbin/lvm pvs "${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}" -o vg_name --noheadings|/usr/bin/sed 's/^[[:space:]]*//g')
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to retrieve volume group name for logical volume $LOGICAL_VOLUME_DEVICE_NAME: $ret"
exit $status
fi
echo "$ret"
}
deactivate_volume_group(){
local volume_group_name
volume_group_name=$(get_volume_group_name)
ret=$(/usr/sbin/lvm vgchange -an "$volume_group_name" 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to deactivate volume group $volume_group_name: $ret"
exit $status
fi
# avoid potential deadlocks with udev rules before continuing
sleep 1
}
check_available_free_space(){
local device="${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
free_device_space_in_bytes=$(get_free_device_size)
# if there is enough free space after the adjacent partition, there is no need to shrink it.
if [[ $free_device_space_in_bytes -gt $INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES ]]; then
SHRINK_SIZE_IN_BYTES=0
return
fi
SHRINK_SIZE_IN_BYTES=$((INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES-free_device_space_in_bytes))
device_type=$(get_device_type "${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}")
if [[ "$device_type" == "lvm" ]]; then
# there is not enough free space after the adjacent partition, calculate how much extra space is needed
# to be fred from the PV
local volume_group_name
volume_group_name=$(get_volume_group_name)
pe_size_in_bytes=$(/usr/sbin/lvm pvdisplay "$device" --units b| /usr/bin/awk 'index($0,"PE Size") {print $3}')
unusable_space_in_pv_in_bytes=$(/usr/sbin/lvm pvdisplay --units B "$device" | /usr/bin/awk 'index($0,"not usable") {print $(NF-1)}'|/usr/bin/numfmt --from=iec)
total_pe_count_in_vg=$(/usr/sbin/lvm vgs "$volume_group_name" -o pv_pe_count --noheadings)
allocated_pe_count_in_vg=$(usr/sbin/lvm vgs "$volume_group_name" -o pv_pe_alloc_count --noheadings)
free_pe_count=$((total_pe_count_in_vg - allocated_pe_count_in_vg))
# factor in the unusable space to match the required number of free PEs
required_pe_count=$(((SHRINK_SIZE_IN_BYTES+unusable_space_in_pv_in_bytes)/pe_size_in_bytes))
if [[ $required_pe_count -gt $free_pe_count ]]; then
echo "Not enough available free PE in VG $volume_group_name: Required $required_pe_count but found $free_pe_count"
exit 1
fi
fi
}
resolve_device_name(){
local device="${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
device_type=$(get_device_type "$device")
if [[ $device_type == "lvm" ]]; then
# It's an LVM block device
# Determine which is the last LV in the PV
# shellcheck disable=SC2016
device=$(/usr/sbin/lvm pvdisplay "$device" -m | /usr/bin/sed -n '/Logical volume/h; ${x;p;}' | /usr/bin/awk '{print $3}')
status=$?
if [[ status -ne 0 ]]; then
echo "Failed to identify the last LV in $device"
exit $status
fi
# Capture the LV device name
LOGICAL_VOLUME_DEVICE_NAME=$device
fi
}
check_device(){
local device="${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
resolve_device_name
ensure_device_not_mounted "$device"
check_available_free_space
}
evict_end_PV() {
local device="${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
local shrinking_start_PE=$1
ret=$(/usr/sbin/lvm pvmove --alloc anywhere "$device":"$shrinking_start_PE"- 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to move PEs in PV $LOGICAL_VOLUME_DEVICE_NAME: $ret"
exit $status
fi
check_filesystem "$LOGICAL_VOLUME_DEVICE_NAME"
}
shrink_physical_volume() {
local device="${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
pe_size_in_bytes=$(/usr/sbin/lvm pvdisplay "$device" --units b| /usr/bin/awk 'index($0,"PE Size") {print $3}')
unusable_space_in_pv_in_bytes=$(/usr/sbin/lvm pvdisplay --units B "$device" | /usr/bin/awk 'index($0,"not usable") {print $(NF-1)}'|/usr/bin/numfmt --from=iec)
total_pe_count=$(/usr/sbin/lvm pvs "$device" -o pv_pe_count --noheadings | /usr/bin/sed 's/^[[:space:]]*//g')
evict_size_in_PE=$((SHRINK_SIZE_IN_BYTES/pe_size_in_bytes))
shrink_start_PE=$((total_pe_count - evict_size_in_PE))
pv_new_size_in_bytes=$(( (shrink_start_PE*pe_size_in_bytes) + unusable_space_in_pv_in_bytes ))
ret=$(/usr/sbin/lvm pvresize --setphysicalvolumesize "$pv_new_size_in_bytes"B -t "$device" -y 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
if [[ $status -eq 5 ]]; then
# ERRNO 5 is equivalent to command failed: https://github.com/lvmteam/lvm2/blob/2eb34edeba8ffc9e22b6533e9cb20e0b5e93606b/tools/errors.h#L23
# Try to recover by evicting the ending PEs elsewhere in the PV, in case it's a failure due to ending PE's being inside the shrinking area.
evict_end_PV $shrink_start_PE
else
echo "Failed to resize PV $device: $ret"
exit $status
fi
fi
echo "Shrinking PV $device to $pv_new_size_in_bytes bytes" >&2
ret=$(/usr/sbin/lvm pvresize --setphysicalvolumesize "$pv_new_size_in_bytes"B "$device" -y 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to resize PV $device during retry: $ret"
exit $status
fi
check_filesystem "$LOGICAL_VOLUME_DEVICE_NAME"
}
calculate_new_end_partition_size_in_bytes(){
local partition_number=$1
local device="${DEVICE_NAME}${PARTITION_PREFIX}${partition_number}"
current_partition_size_in_bytes=$(/usr/sbin/parted -m "$DEVICE_NAME" unit b print| /usr/bin/awk '/^'"$partition_number"':/ {split($0,value,":"); print value[3]}'| /usr/bin/sed -e's/B//g')
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to convert new device size to megabytes $device: $ret"
exit 1
fi
new_partition_size_in_bytes=$(( current_partition_size_in_bytes - SHRINK_SIZE_IN_BYTES))
echo "$new_partition_size_in_bytes"
}
shrink_partition() {
local partition_number=$1
new_end_partition_size_in_bytes=$(calculate_new_end_partition_size_in_bytes "$partition_number")
echo "Shrinking partition $partition_number in $DEVICE_NAME" >&2
ret=$(echo Yes | /usr/sbin/parted "$DEVICE_NAME" ---pretend-input-tty unit B resizepart "$partition_number" "$new_end_partition_size_in_bytes" 2>&1 )
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to resize device $DEVICE_NAME$partition_number to size: $ret"
exit 1
fi
}
shrink_adjacent_partition(){
if [[ $SHRINK_SIZE_IN_BYTES -eq 0 ]]; then
# no need to shrink the PV or the partition as there is already enough free available space after the partition holding the PV
return 0
fi
local device_type
device_type=$(get_device_type "${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}")
if [[ "$device_type" == "lvm" ]]; then
shrink_physical_volume
fi
shrink_partition "$ADJACENT_PARTITION_NUMBER"
if [[ -n "$EXTENDED_PARTITION_NUMBER" ]]; then
# resize the extended partition
shrink_partition "$EXTENDED_PARTITION_NUMBER"
fi
}
shift_adjacent_partition() {
# If boot partition is not the last one, shift the successive partition to the right to take advantage of the newly fred space. Use 'echo '<amount_to_shift>,' | sfdisk --move-data <device name> -N <partition number>
# to shift the partition to the right.
# The astute eye will notice that we're moving the partition, not the last logical volume in the partition.
local target_partition=$ADJACENT_PARTITION_NUMBER
if [[ -n "$EXTENDED_PARTITION_NUMBER" ]]; then
target_partition=$EXTENDED_PARTITION_NUMBER
fi
( sleep 4
while t="$(ps -C sfdisk -o cputime=)"; do
echo "Bigboot partition move is progressing, please wait! ($t)" >&2
sleep 120
done ) &
echo "Moving up partition $target_partition in $DEVICE_NAME by $INCREMENT_BOOT_PARTITION_SIZE" >&2
ret=$(echo "+$INCREMENT_BOOT_PARTITION_SIZE,"| /usr/sbin/sfdisk --move-data "$DEVICE_NAME" -N "$target_partition" --force 2>&1)
status=$?
if [[ status -ne 0 ]]; then
echo "Failed to shift partition '$DEVICE_NAME$target_partition': $ret"
exit $status
fi
}
update_kernel_partition_tables(){
# Ensure no size inconsistencies between PV and partition
local device="${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
device_type=$(get_device_type "$device")
if [[ $device_type == "lvm" ]]; then
ret=$(/usr/sbin/lvm pvresize "$device" -y 2>&1)
status=$?
if [[ status -ne 0 ]]; then
echo "Failed to align PV and partition sizes '$device': $ret"
exit $status
fi
# ensure that the VG is not active so that the changes to the kernel PT are reflected by the partprobe command
deactivate_volume_group
fi
/usr/sbin/partprobe "$DEVICE_NAME" 2>&1
if [[ $device_type == "lvm" ]]; then
# reactivate volume group
activate_volume_group
fi
}
increase_boot_partition() {
local device="${DEVICE_NAME}${PARTITION_PREFIX}${BOOT_PARTITION_NUMBER}"
local new_fs_size_in_blocks=
echo "Increasing boot partition $BOOT_PARTITION_NUMBER in $DEVICE_NAME by $INCREMENT_BOOT_PARTITION_SIZE" >&2
ret=$(echo "- +"| /usr/sbin/sfdisk "$DEVICE_NAME" -N "$BOOT_PARTITION_NUMBER" --no-reread --force 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to shift boot partition '$device': $ret"
return
fi
update_kernel_partition_tables
# Extend the boot file system with `resize2fs <boot_partition>`
if [[ "$BOOT_FS_TYPE" == "ext4" ]]; then
check_filesystem "$device"
increment_boot_partition_in_blocks=$(convert_size_to_fs_blocks "$device" "$INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES")
total_block_count=$(/usr/sbin/tune2fs -l "$device" | /usr/bin/awk '/Block count:/{print $3}')
new_fs_size_in_blocks=$(( total_block_count + increment_boot_partition_in_blocks ))
ret=$(/usr/sbin/resize2fs "$device" $new_fs_size_in_blocks 2>&1)
elif [[ "$BOOT_FS_TYPE" == "xfs" ]]; then
block_size_in_bytes=$(/usr/sbin/xfs_db "$device" -c "sb" -c "print blocksize" |/usr/bin/awk '{print $3}')
current_blocks_in_data=$(/usr/sbin/xfs_db "$device" -c "sb" -c "print dblocks" |/usr/bin/awk '{print $3}')
increment_boot_partition_in_blocks=$((INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES/block_size_in_bytes))
new_fs_size_in_blocks=$((current_blocks_in_data + increment_boot_partition_in_blocks))
# xfs_growfs requires the file system to be mounted in order to change its size
# Create a temporal directory
tmp_dir=$(/usr/bin/mktemp -d)
# Mount the boot file system in the temporal directory
/usr/bin/mount "$device" "$tmp_dir"
ret=$(/usr/sbin/xfs_growfs "$device" -D "$new_fs_size_in_blocks" 2>&1)
# Capture the status
status=$?
# Unmount the file system
/usr/bin/umount "$device"
else
echo "Device $device does not contain an ext4 or xfs file system: $BOOT_FS_TYPE"
return
fi
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to resize boot partition '$device': $ret"
return
fi
echo "Boot file system increased to $new_fs_size_in_blocks blocks" >&2
}
activate_volume_group(){
local device="${DEVICE_NAME}${PARTITION_PREFIX}${ADJACENT_PARTITION_NUMBER}"
local volume_group_name
volume_group_name=$(get_volume_group_name)
ret=$(/usr/sbin/lvm vgchange -ay "$volume_group_name" 2>&1)
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to activate volume group $volume_group_name: $ret"
exit $status
fi
# avoid potential deadlocks with udev rules before continuing
sleep 1
}
# last steps are to run the fsck on boot partition and activate the volume group if necessary
cleanup(){
# run a file system check to the boot file system
check_filesystem "${DEVICE_NAME}${PARTITION_PREFIX}${BOOT_PARTITION_NUMBER}"
}
main() {
init_variables "$@"
check_device
shrink_adjacent_partition
shift_adjacent_partition
increase_boot_partition
cleanup
}
main "$@"

View File

@ -0,0 +1,15 @@
#!/bin/bash
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
check(){
return 0
}
install() {
inst_multiple -o /usr/bin/mount /usr/bin/umount /usr/sbin/parted /usr/bin/mktemp /usr/bin/wc /usr/bin/date /usr/bin/sed /usr/bin/awk /usr/bin/basename /usr/sbin/resize2fs /usr/sbin/tune2fs /usr/sbin/partprobe /usr/bin/numfmt /usr/sbin/lvm /usr/bin/lsblk /usr/sbin/e2fsck /usr/sbin/fdisk /usr/bin/findmnt /usr/bin/tail /usr/ /usr/sbin/xfs_growfs /usr/sbin/xfs_db
# shellcheck disable=SC2154
inst_hook pre-mount 99 "$moddir/increase-boot-partition.sh"
inst_binary "$moddir/sfdisk.static" "/usr/sbin/sfdisk"
inst_simple "$moddir/bigboot.sh" "/usr/bin/bigboot.sh"
}

View File

@ -0,0 +1,20 @@
- name: Find the boot mount entry
ansible.builtin.set_fact:
bigboot_boot_mount_entry: "{{ ansible_facts.mounts | selectattr('mount', 'equalto', '/boot') | first }}"
- name: Calculate the partition to look for
ansible.builtin.set_fact:
bigboot_boot_partition_name: "{{ (bigboot_boot_mount_entry.device | split('/'))[-1] }}"
- name: Find the boot device parent
ansible.builtin.set_fact:
bigboot_boot_disk: "{{ item.key }}"
with_dict: "{{ ansible_facts.devices }}"
when: bigboot_boot_partition_name in item.value.partitions
- name: Capture boot device details
ansible.builtin.set_fact:
bigboot_boot_device_partition_prefix: "{{ bigboot_boot_partition_name[(bigboot_boot_disk | length) : -1] }}"
bigboot_boot_partition_number: "{{ bigboot_boot_partition_name[-1] }}"
bigboot_boot_device_name: "/dev/{{ bigboot_boot_disk }}"
bigboot_boot_device_original_size: "{{ bigboot_boot_mount_entry.size_total | int }}"

View File

@ -0,0 +1,66 @@
---
- name: Make sure the required related facts are available
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- mounts
- devices
- name: Validate bigboot_size is not empty
ansible.builtin.assert:
that: bigboot_size | length >0
fail_msg: "bigboot_size is empty"
- name: Validate initramfs preflight
ansible.builtin.include_role:
name: initramfs
tasks_from: preflight
- name: Get boot device info
ansible.builtin.include_tasks:
file: get_boot_device_info.yml
- name: Copy extend boot dracut module
ansible.builtin.copy:
src: "{{ item }}"
dest: /usr/lib/dracut/modules.d/99extend_boot/
mode: "0554"
loop:
- bigboot.sh
- module-setup.sh
- sfdisk.static
- name: Resolve and copy the shrink-start script
ansible.builtin.template:
src: increase-boot-partition.sh.j2
dest: /usr/lib/dracut/modules.d/99extend_boot/increase-boot-partition.sh
mode: '0554'
- name: Create the initramfs and reboot to run the module
vars:
initramfs_add_modules: "extend_boot"
ansible.builtin.include_role:
name: initramfs
- name: Remove dracut extend boot module
ansible.builtin.file:
path: /usr/lib/dracut/modules.d/99extend_boot
state: absent
- name: Retrieve mount points
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- mounts
- name: Capture boot device new size
ansible.builtin.set_fact:
bigboot_boot_device_new_size: "{{ (ansible_facts.mounts | selectattr('mount', 'equalto', '/boot') | first).size_total | int }}"
- name: Validate boot partition new size
ansible.builtin.assert:
that:
- bigboot_boot_device_new_size != bigboot_boot_device_original_size
fail_msg: "Boot partition size '{{ bigboot_boot_device_new_size }}' did not change"

View File

@ -0,0 +1,31 @@
#!/bin/bash
disable_lvm_lock(){
tmpfile=$(/usr/bin/mktemp)
sed -e 's/\(^[[:space:]]*\)locking_type[[:space:]]*=[[:space:]]*[[:digit:]]/\1locking_type = 1/' /etc/lvm/lvm.conf >"$tmpfile"
status=$?
if [[ status -ne 0 ]]; then
echo "Failed to disable lvm lock: $status" >/dev/kmsg
exit 1
fi
# replace lvm.conf. There is no need to keep a backup since it's an ephemeral file, we are not replacing the original in the initramfs image file
mv "$tmpfile" /etc/lvm/lvm.conf
}
main() {
name=$(basename "$0")
start=$(/usr/bin/date +%s)
disable_lvm_lock
# run bigboot.sh to increase boot partition and file system size
ret=$(sh /usr/bin/bigboot.sh -d="{{ bigboot_boot_device_name }}" -s="{{ bigboot_size }}" -b="{{ bigboot_boot_partition_number }}" -p="{{ bigboot_boot_device_partition_prefix }}" 2>/dev/kmsg)
status=$?
end=$(/usr/bin/date +%s)
# write the log file
if [[ $status -eq 0 ]]; then
echo "[$name] Boot partition {{ bigboot_boot_device_name }} successfully increased by {{ bigboot_size }} ("$((end-start))" seconds) " >/dev/kmsg
else
echo "[$name] Failed to extend boot partition: $ret ("$((end-start))" seconds)" >/dev/kmsg
fi
}
main "$0"

View File

@ -0,0 +1,59 @@
# initramfs
The `initramfs` role is included by the `shrink_lv` and `bigboot` roles to run an atomic flow of building and using a temporary initramfs in a reboot and restoring the original one.
The role is designed to be internal for this collection and support the automation of RHEL in-place upgrades, but can also be used for other purposes.
## Contents
To allow fast fail, the role provides a [`preflight.yml`](./tasks/preflight.yml) tasks file to be used at the start of the playbook.
Please note that the [`main`](./tasks/main.yml) task file will not run the preflight checks
## Role Variables
All variables are optional
### `initramfs_add_modules`
`initramfs_add_modules` is a a space-separated list of dracut modules to be added to the default set of modules.
See [`dracut`](https://man7.org/linux/man-pages/man8/dracut.8.html) `-a` parameter for details.
### `initramfs_backup_extension`
`initramfs_backup_extension` is the file extension for the backup initramfs file.
Defaults to `old`
### `initramfs_post_reboot_delay`
`initramfs_post_reboot_delay` sets the amount of Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
The value is used for [`post_reboot_delay`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/reboot_module.html#parameter-post_reboot_delay) parameter
Defaults to `30`
### `initramfs_reboot_timeout`
`initramfs_reboot_timeout` sets the maximum seconds to wait for machine to reboot and respond to a test command.
The value is used for [`reboot_timeout`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/reboot_module.html#parameter-reboot_timeout) parameter
Defaults to `7200`
## Example of a playbook to run the role
The following yaml is an example of a playbook that runs the role against a group of hosts named `rhel` and increasing the size of its boot partition by 1G.
The boot partition is automatically retrieved by the role by identifying the existing mounted partition to `/boot` and passing the information to the script using the `kernel_opts`.
```yaml
- name: Extend boot partition playbook
hosts: all
tasks:
- name: Validate initramfs preflight
ansible.builtin.include_role:
name: initramfs
tasks_from: preflight
- name: Create the initramfs and reboot to run the module
vars:
initramfs_add_modules: "my_extra_module"
ansible.builtin.include_role:
name: initramfs
```

View File

@ -0,0 +1,4 @@
initramfs_backup_extension: old
initramfs_add_modules: ""
initramfs_post_reboot_delay: 30
initramfs_reboot_timeout: 7200

View File

@ -0,0 +1,40 @@
---
- name: Make sure the required related facts are available
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- kernel
- name: Get kernel version
ansible.builtin.set_fact:
initramfs_kernel_version: "{{ ansible_facts.kernel }}"
- name: Create a backup of the current initramfs
ansible.builtin.copy:
remote_src: true
src: /boot/initramfs-{{ initramfs_kernel_version }}.img
dest: /boot/initramfs-{{ initramfs_kernel_version }}.img.{{ initramfs_backup_extension }}
mode: "0600"
- name: Create a new initramfs with the optional additional modules
# yamllint disable-line rule:line-length
ansible.builtin.command: '/usr/bin/dracut {{ ((initramfs_add_modules | length) > 0) | ternary("-a", "") }} "{{ initramfs_add_modules }}" --kver {{ initramfs_kernel_version }} --force'
changed_when: true
- name: Reboot the server
ansible.builtin.reboot:
post_reboot_delay: "{{ initramfs_post_reboot_delay }}"
reboot_timeout: "{{ initramfs_reboot_timeout }}"
- name: Restore previous initramfs
ansible.builtin.copy:
remote_src: true
src: /boot/initramfs-{{ initramfs_kernel_version }}.img.{{ initramfs_backup_extension }}
dest: /boot/initramfs-{{ initramfs_kernel_version }}.img
mode: "0600"
- name: Remove initramfs backup file
ansible.builtin.file:
path: /boot/initramfs-{{ initramfs_kernel_version }}.img.{{ initramfs_backup_extension }}
state: absent

View File

@ -0,0 +1,27 @@
---
- name: Make sure the required related facts are available
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- kernel
- name: Get kernel version
ansible.builtin.set_fact:
initramfs_kernel_version: "{{ ansible_facts.kernel }}"
- name: Get default kernel
ansible.builtin.command:
cmd: /sbin/grubby --default-kernel
register: initramfs_grubby_rc
changed_when: false
- name: Parse default kernel version
ansible.builtin.set_fact:
initramfs_default_kernel: "{{ ((((initramfs_grubby_rc.stdout_lines[0] | split('/'))[2] | split('-'))[1:]) | join('-')) | trim }}"
- name: Check the values
ansible.builtin.assert:
that: initramfs_default_kernel == initramfs_kernel_version
fail_msg: "Current kernel version '{{ initramfs_kernel_version }}' is not the default version '{{ initramfs_default_kernel }}'"
success_msg: "Current kernel version {{ initramfs_kernel_version }} and default version {{ initramfs_default_kernel }} match"

View File

@ -0,0 +1,55 @@
# shrink_lv
The `shrink_lv` role is used to decrease the size of logical volumes and the file system within them.
The role is designed to support the automation of RHEL in-place upgrades, but can also be used for other purposes.
## Contents
The role contains the shell scripts to shrink the logical volume and file system, as well as the script wrapping it to run as part of the pre-mount step during the boot process.
## Role Variables
### `shrink_lv_devices`
The variable `shrink_lv_devices` is the list of logical volumes to shrink and the target size for those volumes.
#### `device`
The device that is mounted as listed under `/proc/mount`.
If the same device has multiple paths, e.g. `/dev/vg/lv` and `/dev/mapper/vg/lv` pass the path that is mounted
#### `size`
The target size of the logical volume and filesystem after the role has completed.
The value can be either in bytes or with optional single letter suffix (1024 bases).
See `Unit options` type `iec` of [`numfmt`](https://man7.org/linux/man-pages/man1/numfmt.1.html)
## Example of a playbook to run the role
The following yaml is an example of a playbook that runs the role against all hosts to shrink the logical volume `lv` in volume group `vg` to 4G.
```yaml
- name: Shrink Logical Volumes playbook
hosts: all
vars:
shrink_lv_devices:
- device: /dev/vg/lv
size: 4G
roles:
- shrink_lv
```
# Validate execution
The script will add an entry to the kernel messages (`/dev/kmsg` or `/var/log/messages`) with success or failure.
In case of failure, it may also include an error message retrieved from the execution of the script.
A successful execution will look similar to this:
```bash
[root@localhost ~]# cat /var/log/messages |grep Resizing -A 2 -B 2
Oct 16 17:55:00 localhost /dev/mapper/rhel-root: 29715/2686976 files (0.2% non-contiguous), 534773/10743808 blocks
Oct 16 17:55:00 localhost dracut-pre-mount: resize2fs 1.42.9 (28-Dec-2013)
Oct 16 17:55:00 localhost journal: Resizing the filesystem on /dev/mapper/rhel-root to 9699328 (4k) blocks.#012The filesystem on /dev/mapper/rhel-root is now 9699328 blocks long.
Oct 16 17:55:00 localhost journal: Size of logical volume rhel/root changed from 40.98 GiB (10492 extents) to 37.00 GiB (9472 extents).
Oct 16 17:55:00 localhost journal: Logical volume rhel/root successfully resized.
```

View File

@ -0,0 +1 @@
shrink_lv_backup_extension: old

View File

@ -0,0 +1,14 @@
#!/bin/bash
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
check(){
return 0
}
install() {
inst_multiple -o /usr/bin/numfmt /usr/bin/findmnt /usr/bin/lsblk /usr/sbin/lvm /usr/bin/awk /usr/bin/sed /usr/bin/mktemp /usr/bin/date /usr/bin/head /usr/sbin/blockdev /usr/sbin/tune2fs /usr/sbin/resize2fs /usr/bin/cut /usr/sbin/fsadm /usr/sbin/fsck.ext4
# shellcheck disable=SC2154
inst_hook pre-mount 99 "$moddir/shrink-start.sh"
inst_simple "$moddir/shrink.sh" "/usr/bin/shrink.sh"
}

View File

@ -0,0 +1,236 @@
#!/bin/bash
VOLUME_SIZE_ALIGNMENT=4096
function get_device_name() {
if [[ "$1" == "UUID="* ]]; then
dev_name=$( parse_uuid "$1" )
else
dev_name=$(/usr/bin/cut -d " " -f 1 <<< "$1")
fi
status=$?
if [[ status -ne 0 ]]; then
return $status
fi
echo "$dev_name"
return $status
}
function ensure_size_in_bytes() {
local expected_size
expected_size=$(/usr/bin/numfmt --from iec "$1")
(( expected_size=(expected_size+VOLUME_SIZE_ALIGNMENT)/VOLUME_SIZE_ALIGNMENT*VOLUME_SIZE_ALIGNMENT ))
echo $expected_size
}
function is_device_mounted() {
/usr/bin/findmnt --source "$1" 1>&2>/dev/null
status=$?
if [[ status -eq 0 ]]; then
echo "Device $1 is mounted" >&2
return 1
fi
return 0
}
function get_current_volume_size() {
val=$(/usr/bin/lsblk -b "$1" -o SIZE --noheadings)
status=$?
if [[ $status -ne 0 ]]; then
return $status
fi
echo "$val"
return 0
}
function is_lvm(){
val=$( /usr/bin/lsblk "$1" --noheadings -o TYPE 2>&1)
status=$?
if [[ status -ne 0 ]]; then
echo "Failed to list block device properties for $2: $val" >&2
return 1
fi
if [[ "$val" != "lvm" ]]; then
echo "Device $device_name is not of lvm type" >&2
return 1
fi
return 0
}
function parse_uuid() {
uuid=$(/usr/bin/awk '{print $1}'<<< "$1"|/usr/bin/awk -F'UUID=' '{print $2}')
val=$(/usr/bin/lsblk /dev/disk/by-uuid/"$uuid" -o NAME --noheadings 2>/dev/null)
status=$?
if [[ $status -ne 0 ]]; then
echo "Failed to retrieve device name for UUID=$uuid" >&2
return $status
fi
echo "/dev/mapper/$val"
return 0
}
function shrink_volume() {
/usr/sbin/lvm lvreduce --resizefs -L "$2b" "$1"
return $?
}
function check_volume_size() {
current_size=$(get_current_volume_size "$1")
if [[ $current_size -lt $2 ]];then
echo "Current volume size for device $1 ($current_size bytes) is lower to expected $2 bytes" >&2
return 1
fi
if [[ $current_size -eq $2 ]]; then
echo "Current volume size for device $1 already equals $2 bytes" >&2
return 1
fi
return $?
}
function convert_size_to_fs_blocks(){
local device=$1
local size=$2
block_size_in_bytes=$(/usr/sbin/tune2fs -l "$device" | /usr/bin/awk '/Block size:/{print $3}')
echo $(( size / block_size_in_bytes ))
}
function calculate_expected_resized_file_system_size_in_blocks(){
local device=$1
increment_boot_partition_in_blocks=$(convert_size_to_fs_blocks "$device" "$INCREMENT_BOOT_PARTITION_SIZE_IN_BYTES")
total_block_count=$(/usr/sbin/tune2fs -l "$device" | /usr/bin/awk '/Block count:/{print $3}')
new_fs_size_in_blocks=$(( total_block_count - increment_boot_partition_in_blocks ))
echo $new_fs_size_in_blocks
}
function check_filesystem_size() {
local device=$1
local new_fs_size_in_blocks=$2
new_fs_size_in_blocks=$(calculate_expected_resized_file_system_size_in_blocks "$device")
# it is possible that running this command after resizing it might give an even smaller number.
minimum_blocks_required=$(/usr/sbin/resize2fs -P "$device" 2> /dev/null | /usr/bin/awk '{print $NF}')
if [[ "$new_fs_size_in_blocks" -le "0" ]]; then
echo "Unable to shrink volume: New size is 0 blocks"
return 1
fi
if [[ $minimum_blocks_required -gt $new_fs_size_in_blocks ]]; then
echo "Unable to shrink volume: Estimated minimum size of the file system $1 ($minimum_blocks_required blocks) is greater than the new size $new_fs_size_in_blocks blocks" >&2
return 1
fi
return 0
}
function process_entry() {
is_lvm "$1" "$3"
status=$?
if [[ $status -ne 0 ]]; then
return "$status"
fi
expected_size_in_bytes=$(ensure_size_in_bytes "$2")
check_filesystem_size "$1" "$expected_size_in_bytes"
status=$?
if [[ $status -ne 0 ]]; then
return "$status"
fi
check_volume_size "$1" "$expected_size_in_bytes"
status=$?
if [[ $status -ne 0 ]]; then
return "$status"
fi
is_device_mounted "$1"
status=$?
if [[ $status -ne 0 ]]; then
return "$status"
fi
shrink_volume "$1" "$expected_size_in_bytes"
return $?
}
function display_help() {
echo "Program to shrink ext4 file systems hosted in Logical Volumes.
Usage: '$(basename "$0")' [-h] [-d=|--device=]
Example:
where:
-h show this help text
-d|--device= name or UUID of the device that holds an ext4 and the new size separated by a ':'
for example /dev/my_group/my_vol:2G
Sizes will be rounded to be 4K size aligned"
}
function parse_flags() {
for i in "$@"
do
case $i in
-d=*|--device=*)
entries+=("${i#*=}")
;;
-h)
display_help
exit 0
;;
*)
# unknown option
echo "Unknown flag $i"
display_help
exit 1
;;
esac
done
if [[ ${#entries[@]} == 0 ]]; then
display_help
exit 0
fi
}
function parse_entry() {
IFS=':'
read -ra strarr <<< "$1"
if [[ ${#strarr[@]} != 2 ]]; then
echo "Invalid device entry $1"
display_help
return 1
fi
device="${strarr[0]}"
expected_size="${strarr[1]}"
}
function main() {
local -a entries=()
local run_status=0
parse_flags "$@"
for entry in "${entries[@]}"
do
local device
local expected_size
parse_entry "$entry"
status=$?
if [[ $status -ne 0 ]]; then
run_status=$status
continue
fi
device_name=$( get_device_name "$device" )
status=$?
if [[ $status -ne 0 ]]; then
run_status=$status
continue
fi
process_entry "$device_name" "$expected_size" "$device"
status=$?
if [[ $status -ne 0 ]]; then
run_status=$status
fi
done
exit $run_status
}
main "$@"

View File

@ -0,0 +1,20 @@
- name: Get the mount point info
ansible.builtin.set_fact:
shrink_lv_mount_info: "{{ ansible_facts.mounts | selectattr('device', 'equalto', item.device) }}"
- name: Assert that the mount point exists
ansible.builtin.assert:
that: (shrink_lv_mount_info | length) == 1
fail_msg: "Mount point {{ item.device }} does not exist"
- name: Assert that the filesystem is supported
ansible.builtin.assert:
that: shrink_lv_mount_info[0].fstype in ['ext4']
fail_msg: "Unsupported filesystem '{{ shrink_lv_mount_info[0].fstype }}' on '{{ item.device }}'"
- name: Assert that the filesystem has enough free space
ansible.builtin.assert:
that: shrink_lv_mount_info[0].block_size * shrink_lv_mount_info[0].block_used < (item.size | ansible.builtin.human_to_bytes)
fail_msg: >
Requested size {{ item.size }} is smaller than currently used
{{ (shrink_lv_mount_info[0].block_size * shrink_lv_mount_info[0].block_used) | ansible.builtin.human_readable }}

View File

@ -0,0 +1,54 @@
---
- name: Make sure the required facts are available
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- kernel
- mounts
- name: Run preflight checks
ansible.builtin.include_tasks: preflight.yaml
- name: Copy shrink LV dracut module
ansible.builtin.copy:
src: "{{ item }}"
dest: /usr/lib/dracut/modules.d/99shrink_lv/
mode: "0554"
loop:
- module-setup.sh
- shrink.sh
- name: Resolve and copy the shrink-start script
ansible.builtin.template:
src: shrink-start.sh.j2
dest: /usr/lib/dracut/modules.d/99shrink_lv/shrink-start.sh
mode: '0554'
- name: Create the initramfs and reboot to run the module
vars:
initramfs_add_modules: "shrink_lv lvm"
ansible.builtin.include_role:
name: initramfs
- name: Remove dracut extend boot module
ansible.builtin.file:
path: /usr/lib/dracut/modules.d/99shrink_lv
state: absent
- name: Retrieve mount points
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- mounts
- name: Assert that the filesystem has shrunk
ansible.builtin.assert:
# yamllint disable-line rule:line-length
that: (ansible_facts.mounts | selectattr('device', 'equalto', item.device) | map(attribute='size_total') | join | int) <= (item.size | ansible.builtin.human_to_bytes)
fail_msg: >
Logical Volume {{ item.device }} was not shrunk to {{ item.size }} as requested
success_msg: >
Logical Volume {{ item.device }} has been shrunk to {{ item.size }} as requested.
loop: "{{ shrink_lv_devices }}"

View File

@ -0,0 +1,17 @@
---
- name: Assert shrink_lv_devices
ansible.builtin.assert:
that:
- shrink_lv_devices is defined
- shrink_lv_devices | type_debug == "list"
- shrink_lv_devices | length > 0
fail_msg: shrink_lv_devices must be a list and include at least one element
- name: Validate initramfs preflight
ansible.builtin.include_role:
name: initramfs
tasks_from: preflight
- name: Check all devices
ansible.builtin.include_tasks: check_device.yaml
loop: "{{ shrink_lv_devices }}"

View File

@ -0,0 +1,27 @@
#!/bin/bash
disable_lvm_lock(){
tmpfile=$(/usr/bin/mktemp)
sed -e 's/\(^[[:space:]]*\)locking_type[[:space:]]*=[[:space:]]*[[:digit:]]/\1locking_type = 1/' /etc/lvm/lvm.conf >"$tmpfile"
status=$?
if [[ status -ne 0 ]]; then
echo "Failed to disable lvm lock: $status" >/dev/kmsg
exit 1
fi
# replace lvm.conf. There is no need to keep a backup since it's an ephemeral file, we are not replacing the original in the initramfs image file
mv "$tmpfile" /etc/lvm/lvm.conf
}
activate_volume_groups(){
for vg in `/usr/sbin/lvm vgs -o name --noheading 2>/dev/null`; do
/usr/sbin/lvm vgchange -ay $vg
done
}
main() {
activate_volume_groups
disable_lvm_lock
/usr/bin/shrink.sh {% for device in shrink_lv_devices %}--device={{ device.device }}:{{ device.size }} {% endfor %} 1>&2 >/dev/kmsg
}
main "$0"

View File

@ -0,0 +1,84 @@
# snapshot_create role
The `snapshot_create` role is used to control the creation for a defined set of LVM snapshot volumes.
In addition, it can optionally save the Grub configuration and image files under /boot and configure settings to enable the LVM snapshot autoextend capability.
The role will verify free space and should fail if there is not enough or if any snapshots already exist for the given `snapshot_create_set_name`.
The role is designed to support the automation of RHEL in-place upgrades, but can also be used to reduce the risk of more mundane system maintenance activities.
## Role Variables
### `snapshot_create_check_only`
When set to `true` the role will only verify there is enough free space for the specified snapshots and not create them.
Default `false`
### `snapshot_create_set_name`
The variable `snapshot_create_set_name` is used to identify the list of volumes to be operated upon.
The role will use the following naming convention when creating the snapshots:
`<Origin LV name>_<snapshot_create_set_name>`
### `snapshot_create_boot_backup`
Boolean to specify that the role should preserve the Grub configuration and image files under /boot required for booting the default kernel.
The files are preserved in a compressed tar archive at `/root/boot-backup-<snapshot_create_set_name>.tgz`. Default is `false`.
> **Warning**
>
> When automating RHEL in-place upgrades, do not perform a Grub to Grub2 migration as part of your upgrade playbook. It will invalidate your boot backup and cause a subsequent `revert` action to fail. For example, if you are using the [`upgrade`](https://github.com/redhat-cop/infra.leapp/tree/main/roles/upgrade#readme) role from the [`infra.leapp`](https://github.com/redhat-cop/infra.leapp) collection, do not set `update_grub_to_grub_2` to `true`. Grub to Grub2 migration should only be performed after the `remove` action has been performed to delete the snapshots and boot backup.
### `snapshot_create_snapshot_autoextend_threshold`
Configure the given `snapshot_create_autoextend_threshold` setting in lvm.conf before creating snapshots.
### `snapshot_create_snapshot_autoextend_percent`
Configure the given `snapshot_create_snapshot_autoextend_percent` setting in lvm.conf before creating snapshots.
### `snapshot_create_volumes`
This is the list of logical volumes for which snapshots are to be created and the size requirements for those snapshots. The volumes list is only required when the role is run with the check or create action.
### `vg`
The volume group of the origin logical volume for which a snapshot will be created.
### `lv`
The origin logical volume for which a snapshot will be created.
### `size`
The size of the logical volume according to the definition of the
[size](https://docs.ansible.com/ansible/latest/collections/community/general/lvol_module.html#parameter-size)
parameter of the `community.general.lvol` module.
To create thin provisioned snapshot of a thin provisioned volume, omit the `size` parameter or set it to `0`
## Example Playbooks
Perform space check and fail of there will not be enough space for all the snapshots in the set.
If there is sufficient space, proceed to create snapshots for the listed logical volumes.
Each snapshot will be sized to 20% of the origin volume size.
Snapshot autoextend settings are configured to enable free space in the volume group to be allocated to any snapshot that may exceed 70% usage in the future.
Files under /boot will be preserved.
```yaml
- hosts: all
roles:
- name: snapshot_create
snapshot_create_set_name: ripu
snapshot_create_snapshot_autoextend_threshold: 70
snapshot_create_snapshot_autoextend_percent: 20
snapshot_create_boot_backup: true
snapshot_create_volumes:
- vg: rootvg
lv: root
size: 2G
- vg: rootvg
lv: var
size: 2G
```

View File

@ -0,0 +1,2 @@
snapshot_create_volumes: []
snapshot_create_boot_backup: false

View File

@ -0,0 +1,238 @@
'''
Check is there is enough space to created all the requested snapshots
The input should be a json string array.
Each element should have the following keys:
- vg: Name of the volume group
- lv: Name of the Logical Volume
- size: The size of the requested snapshot.
Follow (https://docs.ansible.com/ansible/latest/collections/community/general/lvol_module.html#parameter-size)
without support for sign
'''
import argparse
import json
import math
import os
import subprocess
import sys
_VGS_COMMAND = '/usr/sbin/vgs'
_LVS_COMMAND = '/usr/sbin/lvs'
_EXIT_CODE_SUCCESS = 0
_EXIT_CODE_VOLUME_GROUP_SPACE = 1
_EXIT_CODE_FILE_SYSTEM_TYPE = 2
_EXIT_CODE_VOLUME_SPACE = 3
_supported_filesystems = [
'',
'ext2',
'ext3',
'ext4'
]
class CheckException(Exception):
""" Exception wrapper """
parser = argparse.ArgumentParser()
parser.add_argument('that', help='What should the script check', type=str, choices=['snapshots', 'resize'])
parser.add_argument('volumes', help='Volumes JSON array in a string', type=str)
def _main():
args = parser.parse_args()
try:
volumes = json.loads(args.volumes)
except json.decoder.JSONDecodeError:
print("Provided volume list '{volumes}' it not a valid json string".format(volumes=sys.argv[1]))
sys.exit(1)
groups_names = set(vol['vg'] for vol in volumes)
groups_info = {
group: _get_group_info(group) for group in groups_names
}
for vol in volumes:
vol['normalized_size'] = _calc_requested_size(groups_info[vol["vg"]], vol)
groups_info[vol["vg"]]['requested_size'] += vol['normalized_size']
if args.that == 'snapshots':
exit_code = _check_free_size_for_snapshots(groups_info)
if exit_code == _EXIT_CODE_SUCCESS:
norm_vols = [
{
'vg': vol['vg'],
'lv': vol['lv'],
'size': "{size}B".format(size=vol['normalized_size'])
} for vol in volumes
]
print(json.dumps(norm_vols))
if args.that == 'resize':
exit_code = _check_free_size_for_resize(volumes, groups_info)
sys.exit(exit_code)
def _check_free_size_for_snapshots(groups_info):
return _check_requested_size(groups_info, 'free')
def _check_free_size_for_resize(volumes, groups_info):
exit_code = _check_requested_size(groups_info, 'size')
if exit_code != _EXIT_CODE_SUCCESS:
return exit_code
mtab = _parse_mtab()
for volume in volumes:
mtab_entry = mtab.get("/dev/mapper/{vg}-{lv}".format(vg=volume['vg'], lv=volume['lv']))
volume['fs_type'] = mtab_entry['type'] if mtab_entry else ''
volume['fs_size'] = _calc_filesystem_size(mtab_entry) if mtab_entry else 0
filesystems_supported = all(volume['fs_type'] in _supported_filesystems for volume in volumes)
if not filesystems_supported:
exit_code = _EXIT_CODE_FILE_SYSTEM_TYPE
enough_space = all(vol['normalized_size'] > vol['fs_size'] for vol in volumes)
if not enough_space:
exit_code = _EXIT_CODE_VOLUME_SPACE
if exit_code != _EXIT_CODE_SUCCESS:
print(json.dumps(_to_printable_volumes(volumes)))
return exit_code
def _check_requested_size(groups_info, group_field):
enough_space = all(group['requested_size'] <= group[group_field] for _, group in groups_info.items())
if not enough_space:
print(json.dumps(groups_info))
return _EXIT_CODE_VOLUME_GROUP_SPACE
return _EXIT_CODE_SUCCESS
def _get_group_info(group):
group_info_str = subprocess.check_output([_VGS_COMMAND, group, '-v', '--units', 'b', '--reportformat', 'json'])
group_info_json = json.loads(group_info_str)
group_info = group_info_json['report'][0]['vg'][0]
return {
'name': group,
'size': _get_size_from_report(group_info['vg_size']),
'free': _get_size_from_report(group_info['vg_free']),
'extent_size': _get_size_from_report(group_info['vg_extent_size']),
'requested_size': 0
}
def _calc_requested_size(group_info, volume):
unit = 'm'
requested_size = volume.get('size', 0)
if requested_size == 0:
# handle thin provisioning
pass
if isinstance(requested_size, int) or isinstance(requested_size, float):
size = requested_size
else:
parts = requested_size.split('%')
if len(parts) == 2:
unit = 'b'
percent = float(parts[0])
percent_of = parts[1]
if percent_of == 'VG':
size = group_info['size'] * percent / 100
elif percent_of == 'FREE':
size = group_info['free'] * percent / 100
elif percent_of == 'ORIGIN':
origin_size = _get_volume_size(volume)
size = origin_size * percent / 100
else:
raise CheckException("Unsupported base type {base_type}".format(base_type=percent_of))
else:
try:
size = float(requested_size[:-1])
unit = requested_size[-1].lower()
except ValueError:
raise CheckException('Failed to read requested size {size}'.format(size=requested_size))
return _align_to_extent(_convert_to_bytes(size, unit), group_info['extent_size'])
def _get_volume_size(vol):
volume_info_str = subprocess.check_output(
[_LVS_COMMAND, "{vg}/{lv}".format(vg=vol['vg'], lv=vol['lv']), '-v', '--units', 'b', '--reportformat', 'json']
)
volume_info_json = json.loads(volume_info_str)
volume_info = volume_info_json['report'][0]['lv'][0]
return _get_size_from_report(volume_info['lv_size'])
def _get_size_from_report(reported_size):
try:
size = float(reported_size)
unit = 'm'
except ValueError:
if reported_size[0] == '<':
reported_size = reported_size[1:]
size = float(reported_size[:-1])
unit = reported_size[-1].lower()
return _convert_to_bytes(size, unit)
def _align_to_extent(size, extent_size):
return math.ceil(size / extent_size) * extent_size
def _calc_filesystem_size(mtab_entry):
fs_stat = os.statvfs(mtab_entry['mount_point'])
return (fs_stat.f_blocks - fs_stat.f_bfree) * fs_stat.f_bsize
def _parse_mtab():
mtab = {}
with open('/etc/mtab') as f:
for m in f:
fs_spec, fs_file, fs_vfstype, _fs_mntops, _fs_freq, _fs_passno = m.split()
mtab[fs_spec] = {
'mount_point': fs_file,
'type': fs_vfstype
}
return mtab
def _convert_to_bytes(size, unit):
convertion_table = {
'b': 1024 ** 0,
'k': 1024 ** 1,
'm': 1024 ** 2,
'g': 1024 ** 3,
't': 1024 ** 4,
'p': 1024 ** 5,
'e': 1024 ** 6,
}
return size * convertion_table[unit]
def _convert_to_unit_size(bytes):
units = ['b', 'k', 'm', 'g', 't', 'p', 'e']
i = 0
while bytes >= 1024:
i += 1
bytes /= 1024
# Round down bytes to two digits
bytes = math.floor(bytes * 100) / 100
return "{size}{unit}".format(size=bytes, unit=units[i])
def _to_printable_volumes(volumes):
return {
volume['vg'] + "_" + volume['lv']: {
'file_system_type': volume['fs_type'],
'used': _convert_to_unit_size(volume['fs_size']),
'requested_size': _convert_to_unit_size(volume['normalized_size'])
} for volume in volumes
}
if __name__ == '__main__':
_main()

View File

@ -0,0 +1,26 @@
- name: Verify that all volumes exist
ansible.builtin.include_tasks: verify_volume_exists.yml
loop: "{{ snapshot_create_volumes }}"
- name: Verify that there are no existing snapshots
ansible.builtin.include_tasks: verify_no_existing_snapshot.yml
loop: "{{ snapshot_create_volumes }}"
- name: Verify that there is enough storage space
ansible.builtin.script: check.py snapshots '{{ snapshot_create_volumes | to_json }}'
args:
executable: "{{ ansible_python.executable }}"
register: snapshot_create_check_status
failed_when: false
changed_when: false
- name: Store check return in case of failure
ansible.builtin.set_fact:
snapshot_create_check_failure_json: "{{ snapshot_create_check_status.stdout | from_json }}"
when: snapshot_create_check_status.rc != 0
- name: Assert results
ansible.builtin.assert:
that: snapshot_create_check_status.rc == 0
fail_msg: Not enough space in the Volume Groups to create the requested snapshots
success_msg: The Volume Groups have enough space to create the requested snapshots

View File

@ -0,0 +1,68 @@
- name: Update lvm configuration
block:
- name: Stringify snapshot_autoextend_percent setting
ansible.builtin.set_fact:
snapshot_create_snapshot_autoextend_percent_config: "activation/snapshot_autoextend_percent={{ snapshot_create_snapshot_autoextend_percent }}"
when: snapshot_create_snapshot_autoextend_percent is defined
- name: Stringify snapshot_autoextend_threshold setting
ansible.builtin.set_fact:
snapshot_create_snapshot_autoextend_threshold_config: "activation/snapshot_autoextend_threshold={{ snapshot_create_snapshot_autoextend_threshold }}"
when: snapshot_create_snapshot_autoextend_threshold is defined
- name: Stringify the new config
ansible.builtin.set_fact:
snapshot_create_new_lvm_config: >
{{ snapshot_create_snapshot_autoextend_percent_config | default('') }}
{{ snapshot_create_snapshot_autoextend_threshold_config | default('') }}
- name: Set LVM configuration
ansible.builtin.command: 'lvmconfig --mergedconfig --config "{{ snapshot_create_new_lvm_config }}" --file /etc/lvm/lvm.conf'
changed_when: true
when: ((snapshot_create_new_lvm_config | trim) | length) > 0
- name: Check for grubenv saved_entry
ansible.builtin.lineinfile:
name: /boot/grub2/grubenv
regexp: ^saved_entry=
state: absent
check_mode: true
changed_when: false
failed_when: false
register: snapshot_create_grubenv
- name: Add grubenv saved_entry
ansible.builtin.shell: 'grubby --set-default-index=$(grubby --default-index)'
changed_when: true
when: snapshot_create_grubenv.found is defined and snapshot_create_grubenv.found == 0
- name: Create snapshots
community.general.lvol:
vg: "{{ item.vg }}"
lv: "{{ item.lv }}"
snapshot: "{{ item.lv }}_{{ snapshot_create_set_name }}"
size: "{{ item.size | default(omit) }}"
loop: "{{ snapshot_create_volumes }}"
- name: Required packages are present
ansible.builtin.package:
name:
- gzip
- tar
state: present
- name: Create boot backup
community.general.archive:
format: gz
mode: '0644'
dest: "/root/boot-backup-{{ snapshot_create_set_name }}.tgz"
path:
- "/boot/initramfs-{{ ansible_kernel }}.img"
- "/boot/vmlinuz-{{ ansible_kernel }}"
- "/boot/System.map-{{ ansible_kernel }}"
- "/boot/symvers-{{ ansible_kernel }}.gz"
- "/boot/config-{{ ansible_kernel }}"
- "/boot/.vmlinuz-{{ ansible_kernel }}.hmac"
- /boot/grub/grub.conf
- /boot/grub2/grub.cfg
- /boot/grub2/grubenv
- /boot/loader/entries
- /boot/efi/EFI/redhat/grub.cfg
when: snapshot_create_boot_backup

View File

@ -0,0 +1,8 @@
- name: Check available disk space
ansible.builtin.include_tasks: check.yml
- name: Create Snapshot
vars:
snapshot_create_volumes: "{{ snapshot_create_check_status.stdout | from_json }}"
ansible.builtin.include_tasks: create.yml
when: not (snapshot_create_check_only | default(false))

View File

@ -0,0 +1,21 @@
- name: Run lvs
ansible.builtin.command: >
lvs
--select 'vg_name = {{ item.vg }}
&& origin = {{ item.lv }}
&& lv_name = {{ item.lv }}_{{ snapshot_create_set_name }}'
--reportformat json
register: snapshot_create_lvs_response
changed_when: false
- name: Parse report
ansible.builtin.set_fact:
snapshot_create_lv_snapshot_report_array: "{{ (snapshot_create_lvs_response.stdout | from_json).report[0].lv }}"
- name: Verify that the no snapshot exists for the volume
ansible.builtin.assert:
that: (snapshot_create_lv_snapshot_report_array | length) == 0
fail_msg: >
The volume '{{ item.lv }}' in volume group '{{ item.vg }}'
already has at least one snapshot
'{{ snapshot_create_lv_snapshot_report_array[0].lv_name | default('none') }}'

View File

@ -0,0 +1,9 @@
- name: Run lvs
ansible.builtin.command: "lvs --select 'vg_name = {{ item.vg }} && lv_name = {{ item.lv }}' --reportformat json"
register: snapshot_create_lvs_response
changed_when: false
- name: Verify that the volume was found
ansible.builtin.assert:
that: (((snapshot_create_lvs_response.stdout | from_json).report[0].lv) | length) > 0
fail_msg: "Could not find volume '{{ item.lv }}' in volume group '{{ item.vg }}'"

View File

@ -0,0 +1,32 @@
# snapshot_remove role
The `snapshot_remove` role is used to remove snapshots.
In addition, it removes the Grub configuration and image files under /boot if it was previously backed up
It is intended to be used along with the `snapshot_create` role.
The role is designed to support the automation of RHEL in-place upgrades, but can also be used to reduce the risk of more mundane system maintenance activities.
## Role Variables
### `snapshot_remove_set_name`
The variable `snapshot_remove_set_name` is used to identify the list of volumes to be operated upon.
The role will use the following naming convention when reverting the snapshots:
`<Origin LV name>_<snapshot_remove_set_name>`
This naming convention will be used to identify the snapshots to be removed.
## Example Playbooks
### Commit
A commit playbook is used when users are comfortable the snapshots are not needed any longer.
Each snapshot in the snapshot set is removed and the backed up image files from /boot are deleted.
```yaml
- hosts: all
roles:
- name: snapshot_remove
snapshot_remove_set_name: ripu
```

View File

@ -0,0 +1,23 @@
- name: Calculate the list of snapshots
block:
- name: Get list of volumes
ansible.builtin.command: "lvs --select 'lv_name =~ {{ snapshot_remove_set_name }}$ && origin != \"\"' --reportformat json "
register: snapshot_remove_lvs_response
changed_when: false
- name: Get LV dict List
ansible.builtin.set_fact:
snapshot_remove_snapshots: "{{ (snapshot_remove_lvs_response.stdout | from_json).report[0].lv }}"
- name: Remove snapshots
community.general.lvol:
state: absent
vg: "{{ item.vg_name }}"
lv: "{{ item.origin }}"
snapshot: "{{ item.lv_name }}"
force: true
loop: "{{ snapshot_remove_snapshots }}"
- name: Remove boot backup
ansible.builtin.file:
path: "/root/boot-backup-{{ snapshot_remove_set_name }}.tgz"
state: absent

View File

@ -0,0 +1,36 @@
# snapshot_revert role
The `snapshot_revert` role is used to merge snapshots to origin and reboot (i.e., rollback).
The role will verify that all snapshots in the set are still in active state before doing any merges.
This is to prevent rolling back if any snapshots have become invalidated in which case the role should fail.
In addition, it restores the Grub configuration and image files under /boot is it was previously backed up
It is intended to be used along with the `snapshot_create` role.
The role is designed to support the automation of RHEL in-place upgrades, but can also be used to reduce the risk of more mundane system maintenance activities.
## Role Variables
### `snapshot_revert_set_name`
The variable `snapshot_revert_set_name` is used to identify the list of volumes to be operated upon.
The role will use the following naming convention when reverting the snapshots:
`<Origin LV name>_<snapshot_revert_set_name>`
This naming convention will be used to identify the snapshots to be merged.
The `revert` action will verify that all snapshots in the set are still active state before doing any merges. This is to prevent rolling back if any snapshots have become invalidated in which case the `revert` action should fail.
## Example Playbooks
This playbook rolls back the host using the snapshots created using the `snapshot_create` role.
After verifying that all snapshots are still valid, each logical volume in the snapshot set is merged.
The image files under /boot will be restored and then the host will be rebooted.
```yaml
- hosts: all
roles:
- name: snapshot_revert
snapshot_revert_set_name: ripu
```

View File

@ -0,0 +1,73 @@
- name: Calculate the list of snapshots
block:
- name: Get list of volumes
ansible.builtin.command: "lvs --select 'lv_name =~ {{ snapshot_revert_set_name }}$ && origin != \"\"' --reportformat json "
register: snapshot_revert_lvs_response
changed_when: false
- name: Get LV dict List
ansible.builtin.set_fact:
snapshot_revert_snapshots: "{{ (snapshot_revert_lvs_response.stdout | from_json).report[0].lv }}"
- name: Verify that all snapshots are active
ansible.builtin.include_tasks: verify_snapshot_active.yml
loop: "{{ snapshot_revert_snapshots }}"
- name: Required packages are present
ansible.builtin.package:
name:
- gzip
- tar
state: present
- name: Check if Boot backup exists
ansible.builtin.stat:
path: "/root/boot-backup-{{ snapshot_revert_set_name }}.tgz"
register: snapshot_revert_boot_archive_stat
- name: Restore boot backup
ansible.builtin.unarchive:
remote_src: true
src: "{{ snapshot_revert_boot_archive_stat.stat.path }}"
dest: /boot
when: snapshot_revert_boot_archive_stat.stat.exists
- name: Revert to snapshots
ansible.builtin.command: "lvconvert --merge /dev/{{ item.vg_name }}/{{ item.lv_name }}"
loop: "{{ snapshot_revert_snapshots }}"
changed_when: false
- name: Reboot
ansible.builtin.reboot:
- name: Check if /boot is on LVM
ansible.builtin.command: "grub2-probe --target=abstraction /boot"
changed_when: false
failed_when: false
register: snapshot_revert_boot_abstraction
- name: Reinstall Grub to boot device
when: snapshot_revert_boot_abstraction.stdout == 'lvm'
block:
- name: Get boot device
ansible.builtin.shell: "lsblk -spnlo name $(grub2-probe --target=device /boot)"
changed_when: false
register: snapshot_revert_boot_dev_deps
- name: Run grub2-install
ansible.builtin.command: "grub2-install {{ snapshot_revert_boot_dev_deps.stdout_lines | last }}"
changed_when: true
- name: Remove boot backup
ansible.builtin.file:
path: "{{ snapshot_revert_boot_archive_stat.stat.path }}"
state: absent
when: snapshot_revert_boot_archive_stat.stat.exists
- name: Wait for the snapshot to drain
ansible.builtin.command: "lvs --select 'vg_name = {{ item.vg_name }} && lv_name = {{ item.origin }}' --reportformat json"
register: snapshot_revert_lv_drain_check
until: (snapshot_revert_lv_drain_check.stdout | from_json).report[0].lv[0].data_percent == ""
retries: 20
delay: 30
loop: "{{ snapshot_revert_snapshots }}"
changed_when: false

View File

@ -0,0 +1,14 @@
- name: Run lvs
ansible.builtin.command: "lvs --select 'lv_name = {{ item.lv_name }}' --reportformat json"
register: snapshot_revert_lvs_response
changed_when: false
- name: Parse report
ansible.builtin.set_fact:
snapshot_revert_lv_attr: "{{ (snapshot_revert_lvs_response.stdout | from_json).report[0].lv[0].lv_attr }}"
- name: Verify that the snapshot is active
ansible.builtin.assert:
that:
- snapshot_revert_lv_attr[0] == 's'
- snapshot_revert_lv_attr[4] == 'a'

View File

@ -0,0 +1,39 @@
# Testing the LVM Snapshot Role
## Prerequisites
- All the tests are in the form of ansible playbooks.
- All playbooks expect that the target machine will have a secondary storage device to be used for testing.
## Variables
The variables may be passed as part of the inventory or using a separate file.
```yaml
device: < device node without `/dev`. e.g. vdb >
```
## Ansible Configuration
In order to run the tests from the repo without having to install them,
the tests directory includes an [ansible.cfg](./ansible.cfg) file.
Make sure to point to it when running the test playbook
## Running a test
### Inventory file
In this example, the `device` parameter is passed in the `inventory.yml` file
```yaml
all:
hosts:
<FQDN of test machine>:
device: vdb
```
### Command line
Running the [snapshot revert playbook](./test-revert-playbook.yml) test from the repo
```bash
ANSIBLE_CONFIG=./tests/ansible.cfg ansible-playbook -K -i inventory.yml tests/test-revert-playbook.yml
```

View File

@ -0,0 +1,2 @@
[defaults]
roles_path=~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:../roles

View File

@ -0,0 +1,15 @@
- name: Generate snapshot list out of volumes list
ansible.builtin.set_fact:
_snapshots: "{{ (_snapshots | default([])) + [{'vg': volume_group, 'lv': item.name, 'size': item.size}] }}"
loop: "{{ volumes }}"
- name: Create the snapshot
vars:
snapshot_create_volumes: "{{ _snapshots }}"
snapshot_create_set_name: "{{ snapshot_set_name }}"
ansible.builtin.include_role:
name: snapshot_create
- name: Verify that the snapshot was created
ansible.builtin.include_tasks: verify-snapshot-created.yml
loop: "{{ volumes }}"

View File

@ -0,0 +1,40 @@
- name: Fill the volume
block:
- name: Set the retry count
ansible.builtin.set_fact:
_retry_count: "{{ (_retry_count | default('-1') | int) + 1 }}"
- name: Generate the Sub-Directory name
ansible.builtin.set_fact:
_sub_dir_name: "{{ lookup('community.general.random_string', upper=false, numbers=false, special=false) }}"
- name: Make a copy of the boot partition
ansible.builtin.copy:
src: /boot
dest: "{{ test_directory }}/{{ _sub_dir_name }}"
remote_src: true
mode: '0777'
- name: Get the status of the snapshot
ansible.builtin.command: "lvs --select 'lv_name = {{ volume_name }}_{{ snapshot_set_name }}' --reportformat json"
register: _lv_status_check
changed_when: false
- name: Store the snapshot data_percent
ansible.builtin.set_fact:
_snapshot_data_percent: "{{ ((_lv_status_check.stdout | from_json).report[0].lv[0].data_percent) }}"
- name: Check if snapshot is full enough
ansible.builtin.assert:
that: _snapshot_data_percent|float > snapshot_fill_percent|float
quiet: true
rescue:
- name: Check the retry count to avoid endless loop
ansible.builtin.assert:
that: (_retry_count|int) < (snapshot_max_retry|int)
fail_msg: "Ended after {{ snapshot_max_retry }} retries"
success_msg: "Volume is not full enough ({{ _snapshot_data_percent }}) - Run again..."
- name: Include the same tasks file again
ansible.builtin.include_tasks: fill-snapshot.yml

View File

@ -0,0 +1,11 @@
- name: Unmount the "{{ item.directory }}"
ansible.posix.mount:
path: "{{ item.directory }}"
state: absent
- name: Remove the logical volume
community.general.lvol:
vg: "{{ volume_group }}"
lv: "{{ item.name }}"
force: true
state: absent

View File

@ -0,0 +1,25 @@
- name: Cleanup the volumes
ansible.builtin.include_tasks: post-test-clean-volume.yml
loop: "{{ volumes }}"
- name: Remove the volume group
community.general.lvg:
vg: "{{ volume_group }}"
pvs: "/dev/{{ device }}1"
state: absent
- name: Remove the PV
ansible.builtin.command: "pvremove /dev/{{ device }}1"
changed_when: true
- name: Cleanup the system.devices file
ansible.builtin.lineinfile:
path: /etc/lvm/devices/system.devices
regexp: "IDTYPE=devname IDNAME=/dev/{{ device }}1 DEVNAME=/dev/{{ device }}1 PVID=. PART=1"
state: absent
- name: Delete the partition
community.general.parted:
device: "/dev/{{ device }}"
number: 1
state: absent

View File

@ -0,0 +1,18 @@
- name: Create the logical volume
community.general.lvol:
vg: "{{ volume_group }}"
lv: "{{ item.name }}"
size: "{{ item.size }}"
force: true
- name: Format the ext4 filesystem
community.general.filesystem:
fstype: ext4
dev: "/dev/{{ volume_group }}/{{ item.name }}"
- name: Mount the lv on "{{ item.directory }}"
ansible.posix.mount:
path: "{{ item.directory }}"
src: "/dev/{{ volume_group }}/{{ item.name }}"
fstype: ext4
state: mounted

View File

@ -0,0 +1,23 @@
- name: Create partition
community.general.parted:
device: "/dev/{{ device }}"
number: 1
part_end: 9GiB
flags:
- lvm
state: present
- name: Install lvm2 dependency
ansible.builtin.package:
name: lvm2
state: present
- name: Create the volume group
community.general.lvg:
vg: "{{ volume_group }}"
pvs: "/dev/{{ device }}1"
pesize: 16
- name: Create and prepare the volumes
ansible.builtin.include_tasks: pre-test-prepare-volume.yml
loop: "{{ volumes }}"

View File

@ -0,0 +1,62 @@
- name: Test creating snapshots as percentage of free
hosts: all
become: true
vars:
volume_group: test_vg
base_volume_name: test_lv
volume_size: 3g
base_test_directory: "/mnt/test"
snapshot_set_name: demo_snap
tasks:
- name: Run pre-test steps
vars:
volumes:
- name: "{{ base_volume_name }}-1"
size: "{{ volume_size }}"
directory: "{{ base_test_directory }}-1"
- name: "{{ base_volume_name }}-2"
size: "{{ volume_size }}"
directory: "{{ base_test_directory }}-2"
ansible.builtin.include_tasks: pre-test-tasks.yml
- name: Create the snapshot
vars:
volumes:
- name: "{{ base_volume_name }}-1"
size: 45%FREE
- name: "{{ base_volume_name }}-2"
size: 45%FREE
ansible.builtin.include_tasks: create-snapshot.yml
- name: Verify the snapshot sizes and cleanup
block:
- name: Get the size of the created snapshot
ansible.builtin.command: lvs --select 'vg_name = {{ volume_group }} && origin = {{ item }}' --units b --reportformat json
register: lvs_response
changed_when: false
loop:
- "{{ base_volume_name }}-1"
- "{{ base_volume_name }}-2"
- name: Parse report
ansible.builtin.set_fact:
lvs_sizes: "{{ (lvs_sizes | default([])) + [(item.stdout | from_json).report[0].lv[0].lv_size] }}"
loop: "{{ lvs_response.results }}"
- name: Assert that both snapshots have the same size
ansible.builtin.assert:
that: lvs_sizes[0] == lvs_sizes[1]
fail_msg: "The snapshots were not created with the same size: {{ lvs_sizes[0] }} != {{ lvs_sizes[1] }}"
success_msg: "Both snapshots were created with the same size: {{ lvs_sizes[0] }}"
always:
- name: Remove Snapshot
vars:
snapshot_remove_set_name: "{{ snapshot_set_name }}"
ansible.builtin.include_role:
name: snapshot_remove
- name: Cleanup
vars:
volumes:
- name: "{{ base_volume_name }}-1"
directory: "{{ base_test_directory }}-1"
- name: "{{ base_volume_name }}-2"
directory: "{{ base_test_directory }}-2"
ansible.builtin.include_tasks: post-test-tasks.yml

View File

@ -0,0 +1,52 @@
- name: Test revering to the snapshots
hosts: all
become: true
vars:
volume_group: test_vg
test_directory: "/mnt/test"
volume_name: test_lv
volumes:
- name: "{{ volume_name }}"
size: 4g
directory: "{{ test_directory }}"
snapshot_set_name: demo_snap
snapshot_fill_percent: 60
snapshot_max_retry: 100
tasks:
- name: Run pre-test steps
ansible.builtin.include_tasks: pre-test-tasks.yml
- name: Create the snapshot
ansible.builtin.include_tasks: create-snapshot.yml
- name: Fill the snapshot
ansible.builtin.include_tasks: fill-snapshot.yml
- name: Revert to Snapshot
vars:
snapshot_revert_set_name: "{{ snapshot_set_name }}"
ansible.builtin.include_role:
name: snapshot_revert
- name: Verify that the snapshot was completely drained
block:
- name: Verify that the snapshot no longer exists
vars:
volume_name: test_lv
ansible.builtin.include_tasks: verify-snapshot-not-exist.yml
- name: Verify that the snapshot was drained before returning
block:
- name: Get the status of the volume
ansible.builtin.command: "lvs --select 'lv_name = {{ volume_name }}' --reportformat json"
register: _lv_status_check
changed_when: false
- name: Store the snapshot data_percent
ansible.builtin.set_fact:
volume_data_percent: "{{ ((_lv_status_check.stdout | from_json).report[0].lv[0].data_percent) }}"
- name: Assert volume_data_percent is 0
ansible.builtin.assert:
that: volume_data_percent|float == 0.0
fail_msg: "Volume data percent is {{ volume_data_percent }} while it should be 0"
always:
- name: Cleanup
ansible.builtin.include_tasks: post-test-tasks.yml

View File

@ -0,0 +1,32 @@
- name: Test removing snapshots after creating them
hosts: all
become: true
vars:
volume_group: test_vg
volumes:
- name: test_lv
size: 1g
directory: /mnt/test
snapshot_set_name: demo_snap
snapshot_create_snapshot_autoextend_threshold: 80
snapshot_create_snapshot_autoextend_percent: 15
tasks:
- name: Run pre-test steps
ansible.builtin.include_tasks: pre-test-tasks.yml
- name: Create the snapshot
ansible.builtin.include_tasks: create-snapshot.yml
- name: Remove Snapshot
vars:
snapshot_remove_set_name: "{{ snapshot_set_name }}"
ansible.builtin.include_role:
name: snapshot_remove
- name: Verify that the snapshot no longer exist
vars:
volume_name: test_lv
ansible.builtin.include_tasks: verify-snapshot-not-exist.yml
- name: Cleanup
ansible.builtin.include_tasks: post-test-tasks.yml

View File

@ -0,0 +1,63 @@
- name: Test revering to the snapshots
hosts: all
become: true
vars:
volume_group: test_vg
test_directory: "/mnt/test"
volumes:
- name: test_lv
size: 1g
directory: "{{ test_directory }}"
test_file: "{{ test_directory }}/foo.txt"
snapshot_set_name: demo_snap
tasks:
- name: Run pre-test steps
ansible.builtin.include_tasks: pre-test-tasks.yml
- name: Create the snapshot
ansible.builtin.include_tasks: create-snapshot.yml
- name: Create test file
block:
- name: Verify that the file does not exist
block:
- name: Run ls
ansible.builtin.command: "ls {{ test_file }}"
register: ls_response
changed_when: false
ignore_errors: true
failed_when: ls_response.rc == 0
- name: Create the file using touch
ansible.builtin.file:
path: "{{ test_file }}"
state: touch
mode: u=rw,g=r,o=r
- name: Verify that the file exists
block:
- name: Run ls
ansible.builtin.command: "ls {{ test_file }}"
register: ls_response
changed_when: false
- name: Revert to Snapshot
vars:
snapshot_revert_set_name: "{{ snapshot_set_name }}"
ansible.builtin.include_role:
name: snapshot_revert
- name: Verify that the file no longer exist
block:
- name: Run ls
ansible.builtin.command: "ls {{ test_file }}"
register: ls_response
changed_when: false
ignore_errors: true
failed_when: ls_response.rc == 0
- name: Verify that the snapshot no longer exists
vars:
volume_name: test_lv
ansible.builtin.include_tasks: verify-snapshot-not-exist.yml
- name: Cleanup
ansible.builtin.include_tasks: post-test-tasks.yml

View File

@ -0,0 +1,39 @@
- name: Test trying to create to big a snapshot
hosts: all
become: true
vars:
volume_group: test_vg
volumes:
- name: test_lv
size: 8g
directory: /mnt/test
snapshot_set_name: demo_snap
snapshot_create_snapshot_autoextend_threshold: 80
snapshot_create_snapshot_autoextend_percent: 15
tasks:
- name: Run pre-test steps
ansible.builtin.include_tasks: pre-test-tasks.yml
- name: Create the snapshot and handle the failure
block:
- name: Create the snapshot
ansible.builtin.include_tasks: create-snapshot.yml
always:
- name: Verify that the snapshot does not exist
vars:
volume_name: test_lv
ansible.builtin.include_tasks: verify-snapshot-not-exist.yml
- name: Cleanup
ansible.builtin.include_tasks: post-test-tasks.yml
- name: Print the failure JSON if exists
ansible.builtin.debug:
var: snapshot_create_check_failure_json
when: snapshot_create_check_failure_json is defined
- name: Check results
ansible.builtin.assert:
that:
- snapshot_create_check_failure_json is defined
- snapshot_create_check_failure_json.test_vg
- snapshot_create_check_failure_json.test_vg.size == 9646899200
- snapshot_create_check_failure_json.test_vg.free == 1056964608
- snapshot_create_check_failure_json.test_vg.requested_size == 8589934592

View File

@ -0,0 +1,24 @@
- name: Run lvs
ansible.builtin.command: lvs --select 'vg_name = {{ volume_group }} && origin = {{ item.name }}' --reportformat json
register: lvs_response
changed_when: false
- name: Parse report
ansible.builtin.set_fact:
lv_snapshot_array: "{{ (lvs_response.stdout | from_json).report[0].lv }}"
- name: Verify that the the snapshot exists
ansible.builtin.assert:
that: (lv_snapshot_array | length) == 1
fail_msg: >
The snapshot for {{ item.name }} was not created
- name: Get the snapshot name
ansible.builtin.set_fact:
snapshot_name: "{{ lv_snapshot_array[0].lv_name | default('n/a') }}"
- name: Verify that the the snapshot was named correctly
ansible.builtin.assert:
that: snapshot_name == item.name + '_' + snapshot_set_name
fail_msg: >
Snapshot name '{{ snapshot_name }}' is not as expected {{ item.name }}_{{ snapshot_set_name }}

View File

@ -0,0 +1,16 @@
- name: Run lvs
ansible.builtin.command: lvs --select 'vg_name = {{ volume_group }} && lv_name = {{ volume_name }}_{{ snapshot_set_name }}' --reportformat json
register: lvs_response
changed_when: false
- name: Parse report
ansible.builtin.set_fact:
lv_snapshot_report_array: "{{ (lvs_response.stdout | from_json).report[0].lv }}"
- name: Verify that the snapshot no longer exists
ansible.builtin.assert:
that: (lv_snapshot_report_array | length) == 0
fail_msg: >
The snapshot '{{ volume_name }}_{{ snapshot_set_name }}'' for
volume '{{ volume_name }}' in volume group '{{ volume_group }}'
still exists