Compare commits

5 Commits

Author SHA1 Message Date
4bea43d589 Sync with Develop (#7)
Reviewed-on: #7
Co-authored-by: Chris Hammer <chris@thezengarden.net>
Co-committed-by: Chris Hammer <chris@thezengarden.net>
2025-01-08 16:44:31 -05:00
4d10a958d2 Merge pull request 'named-chroot' (#5) from develop into main
Reviewed-on: #5
2024-08-02 23:42:37 -04:00
05ad0d4c36 Merge pull request 'Sync with develop' (#4) from develop into main
Reviewed-on: #4
2024-08-02 10:12:57 -04:00
8beb34f553 Merge pull request 'Develop Sync' (#3) from develop into main
Reviewed-on: #3
2024-07-22 15:50:22 -04:00
d6de38bfbc Merge pull request 'Move develop into main' (#2) from develop into main
Reviewed-on: #2
2024-07-11 21:41:44 -04:00
52 changed files with 824 additions and 642 deletions

View File

@ -57,13 +57,3 @@
name: infra.lvm_snapshots.bigboot
vars:
bigboot_partition_size: "{{ bigboot_size_target }}"
- name: Failure on request
ansible.builtin.fail:
msg: "Ansible job has been failed upon request."
when:
- bigboot_fail_request | default(false) | bool
rescue:
- name: Cleanup from any previous executions
ansible.builtin.import_tasks: tasks/cleanup.yml

View File

@ -1,42 +1,6 @@
---
- name: ReaR Backup Playbook
hosts: all
become: true
gather_facts: true
strategy: free
vars_files:
- bigboot_vars.yml
tasks:
- name: Import rear_vars role
ansible.builtin.import_role:
name: rhc.rear.rear_vars
- name: Debug rear_vars_nfs_share
ansible.builtin.debug:
var: rear_vars_nfs_share
# - name: Fetch the template name from NFS exports
# ansible.builtin.set_fact:
# job_template_name: "{{ nfs_exports_template_name }}"
# - name: Check for previous backup completion
# ansible.builtin.stat:
# path: "{{ rear_backup_success_file_path }}/{{ rear_backup_workflow }}_rear_success"
# register: rear_backup_success_file
# - name: End if backup has already completed successfully
# ansible.builtin.meta: end_host
# when:
# - rear_backup_success_file['stat']['exists'] | bool
# - not rear_force_backup | default(false) | bool
# - name: Perform ReaR Backup
# when:
# - bigboot_data[inventory_hostname]['bigboot_execute_bigboot'] | default(false) | bool
# - not rear_backup_skip | default(false) | bool
# block:
# - name: Perform ReaR backup
# ansible.builtin.include_role:
# name: rhc.rear.rear_backup
- name: Perform a ReaR backup before the /boot expansion
ansible.builtin.import_playbook: rhc.rear.rear_backup
when:
- bigboot_data[inventory_hostname]['bigboot_execute_bigboot'] | default(false) | bool
- not rear_backup_skip | default(false) | bool

View File

@ -1,6 +0,0 @@
---
- name: Cleanup ReaR artifacts on successful Bigboot
ansible.builtin.import_playbook: rhc.rear.rear_remove
when:
- bigboot_data[inventory_hostname]['bigboot_execute_bigboot'] | default(false) | bool
- not rear_backup_skip | default(false) | bool

View File

@ -32,10 +32,9 @@
tasks:
- name: Grab the template name
ansible.builtin.set_stats:
aggregate: false
per_host: false
data:
nfs_exports_template_name: "{{ tower_job_template_name }}"
aggregate: true
- name: Create IP list and add to NFS exports
ansible.builtin.include_tasks: tasks/rear_nfs_exports.yml

View File

@ -1,5 +1,5 @@
---
- name: Execute ReaR restore
- name: Execute a ReaR restore and restore system to pre-Bigboot state
hosts: all
become: true
gather_facts: true
@ -9,8 +9,8 @@
- bigboot_vars.yml
tasks:
- name: Restore from ReaR backup
ansible.builtin.include_role:
- name: Shrink the logical volume to support /boot expansion
ansible.builtin.import_role:
name: rhc.rear.rear_restore
- name: Cleanup from any previous executions

View File

@ -12,12 +12,20 @@
- name: Cleanup from any previous executions
ansible.builtin.import_tasks: tasks/cleanup.yml
# - name: Check for services that require being disabled
# ansible.builtin.import_tasks: tasks/check_services.yml
- name: Set boot device details
ansible.builtin.import_tasks: tasks/capture_boot_device_details.yml
- name: Set logical volume information
ansible.builtin.import_tasks: tasks/capture_lv_device_details.yml
# - name: Run pre-checks to verify environment
# ansible.builtin.import_tasks: tasks/pre-checks.yml
# when:
# - bigboot_execute_bigboot | bool
- name: Set environment for subsequent workflow nodes
ansible.builtin.set_stats:
aggregate: true

View File

@ -0,0 +1,8 @@
download_url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/infra-lvm_snapshots-2.1.0.tar.gz
format_version: 1.0.0
name: lvm_snapshots
namespace: infra
server: https://galaxy.ansible.com/api/
signatures: []
version: 2.1.0
version_url: /api/v3/plugin/ansible/content/published/collections/index/infra/lvm_snapshots/versions/2.1.0/

View File

@ -1,5 +0,0 @@
---
exclude_paths:
- .github
- changelogs
...

View File

@ -1 +0,0 @@
inventory.yml

View File

@ -1,78 +0,0 @@
[MESSAGES CONTROL]
disable=
# "F" Fatal errors that prevent further processing
# import-error,
# "I" Informational noise
# "E" Error for important programming issues (likely bugs)
# no-member,
# no-name-in-module,
# raising-bad-type,
# "W" Warnings for stylistic problems or minor programming issues
# no-absolute-import,
# arguments-differ,
# cell-var-from-loop,
# fixme,
; lost-exception,
; no-init,
; pointless-string-statement,
; protected-access,
; redefined-outer-name,
; relative-import,
; undefined-loop-variable,
; unsubscriptable-object,
# unused-argument,
; unused-import,
; unspecified-encoding,
# "C" Coding convention violations
; bad-continuation,
; missing-docstring,
; wrong-import-order,
; use-maxsplit-arg,
; consider-using-dict-items,
; consider-using-enumerate,
# "R" Refactor recommendations
; duplicate-code,
; no-self-use,
; too-few-public-methods,
; too-many-branches,
; too-many-locals,
; too-many-statements,
; consider-using-from-import,
; use-list-literal,
; use-dict-literal,
# new for python3 version of pylint
; useless-object-inheritance,
; consider-using-set-comprehension, # pylint3 force to use comprehension in place we don't want (py2 doesn't have these options, for inline skip)
; unnecessary-pass,
; invalid-envvar-default, # pylint3 warnings envvar returns str/none by default
; bad-option-value, # python 2 doesn't have import-outside-toplevel, but in some case we need to import outside toplevel
; super-with-arguments, # required in python 2
; raise-missing-from, # no 'raise from' in python 2
; use-a-generator, # cannot be modified because of Python2 support
; consider-using-with, # on bunch spaces we cannot change that...
; duplicate-string-formatting-argument, # TMP: will be fixed in close future
consider-using-f-string, # sorry, not gonna happen, still have to support py2
; use-dict-literal
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
[DESIGN]
max-args=11 # 2x + 1 from default
max-attributes=21 # 4x + 1 from default
[REPORTS]
msg-template='[{msg_id} {symbol}] {msg} File: {path}, line {line}, in {obj}'
[BASIC]
# In order to make a check more strict add proper regex http://pylint-messages.wikidot.com/messages:c0103
argument-rgx=.*
attr-rgx=.*
class-rgx=.*
const-rgx=.*
function-rgx=.*
method-rgx=.*
module-rgx=.*
variable-rgx=.*
inlinevar-rgx=.*

View File

@ -1,4 +0,0 @@
rules:
indentation:
spaces: 2
indent-sequences: false

View File

@ -4,36 +4,6 @@ LVM Snapshot Linux Role Release Notes
.. contents:: Topics
v2.1.1
======
Minor Changes
-------------
- Improve documentation and example usage of initramfs role
- Improved console logging of bigboot progress to include percent complete
- Support check mode when using the bigboot role
Bugfixes
--------
- Fix bigboot repeatedly increasing the partition size
v2.1.0
======
Major Changes
-------------
- add bigboot support for Btrfs next partition
Minor Changes
-------------
- do bigboot LVM changes with Ansible instead of pre-mount hook
- new bigboot_partition_size variable to make bigboot role more idempotent
- show console log output from bigboot even if quiet kernel arg is set
v2.0.3
======

View File

@ -0,0 +1,670 @@
{
"files": [
{
"name": ".",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_remove",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_remove/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_remove/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "509ab8a1e07e4d71837884e88d8190ef13f58745ee10a8ade71e73e788308087",
"format": 1
},
{
"name": "roles/snapshot_remove/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5e2d063f1f2eb572281101338b21d84d0ec3cf171a7ade4cf456c54179f4f2e6",
"format": 1
},
{
"name": "roles/snapshot_remove/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_remove/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "606e6e273462d94697558655fa3bfb1491e3474b112bd1e3a549115d50b44632",
"format": 1
},
{
"name": "roles/initramfs",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/initramfs/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/initramfs/tasks/preflight.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b1418bba68bd83500aee8fb067562e4cae27dd1c6585d74938f1d3b15a5c575a",
"format": 1
},
{
"name": "roles/initramfs/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6e33876d82d59a25393985cd9948ee16b6ab29a7585c8ba2b30197ab16f7a767",
"format": 1
},
{
"name": "roles/initramfs/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "c628601c9a2f5c904131e0dc9c0e79f3c6f189b30bf64de651d95986f9bb9f02",
"format": 1
},
{
"name": "roles/initramfs/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/initramfs/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "825f9c7e7b25072c135162acead4ed57c8ab270b3dabc7b7eb626b6d35bdb050",
"format": 1
},
{
"name": "roles/initramfs/defaults",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/initramfs/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "f201653d875b43e7915feb027605fc95b1955debe5535f131ac07821fb6bc878",
"format": 1
},
{
"name": "roles/snapshot_revert",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_revert/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_revert/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5c80af9b54761322d9c7fb7a5366f32784d455f417aa05545f64e8efd3350b08",
"format": 1
},
{
"name": "roles/snapshot_revert/tasks/verify_snapshot_active.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "339d8c553502c3807457f2c3f9f7e8bfb136d260e73cc9e87e1990befacb69a5",
"format": 1
},
{
"name": "roles/snapshot_revert/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5d4174b8205d0a5c8c900a57260fd6ae66b84cdffd698eb44048bbfa58b2cff0",
"format": 1
},
{
"name": "roles/snapshot_revert/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_revert/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "642e5210da82a46d6bf143c0dfcb40af3866057defe55e9cf268a2ff6a1e892e",
"format": 1
},
{
"name": "roles/snapshot_create",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_create/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_create/tasks/create.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4be981713a7b7c9b784fb60170c3aea095073cd35d591cca006a782f0f7b7356",
"format": 1
},
{
"name": "roles/snapshot_create/tasks/check.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "d9fb57d25714b52d131ac49fc3471dd0e78a29fe39b4e853a880a8007ef92ca8",
"format": 1
},
{
"name": "roles/snapshot_create/tasks/verify_volume_exists.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "87fb805b1619aa98d312deb540775a14c3282b6f4515ecdeaa1690fa51e73339",
"format": 1
},
{
"name": "roles/snapshot_create/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "47495cf552f5c4038303911f8f7786a41c6397cea379ab0d70cb76fe015ebe10",
"format": 1
},
{
"name": "roles/snapshot_create/tasks/verify_no_existing_snapshot.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "724a282c30555bd9e0950675885999537b9200de1e7823509f583f09195a938d",
"format": 1
},
{
"name": "roles/snapshot_create/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "7c6e97382311dfee2ae894ad656fbe292a17d54c9029e5ce07c1587e09fe9c14",
"format": 1
},
{
"name": "roles/snapshot_create/files",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_create/files/check.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "911f7680558f08a7bd2d415a250e7e2b7d60aef2a633f0ce4d6bfa87b08db076",
"format": 1
},
{
"name": "roles/snapshot_create/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_create/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "d0c9faaf48633e7d073817625fbf0d95f975c69dd2201b99c52f557e15c6431e",
"format": 1
},
{
"name": "roles/snapshot_create/defaults",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/snapshot_create/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "25d6004aad8f7ffa6417bb5be7e28a7c06f22c641ce4b66ef31afc67bc972312",
"format": 1
},
{
"name": "roles/shrink_lv",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/shrink_lv/templates",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/shrink_lv/templates/shrink-start.sh.j2",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "aa1864ed4b3767f2c75b984d80a8a80667e0bbdba43ef049f03e9abbd4f6b030",
"format": 1
},
{
"name": "roles/shrink_lv/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/shrink_lv/tasks/preflight.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "97eb164e678d888f95513079d7d22001b0af23a17899b94bd82cafba3808eea1",
"format": 1
},
{
"name": "roles/shrink_lv/tasks/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2717ed24e19652be68fe5120f22f77e7b5fea969b8cb97257d77302710f0417d",
"format": 1
},
{
"name": "roles/shrink_lv/tasks/check_device.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "201415597a64407c404134452f90b7ffe62370e10c245040cffb5c3be7d4ac2c",
"format": 1
},
{
"name": "roles/shrink_lv/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4da5f48565753fcfac29f1856454085f503c207578eb6c58d750e2c76baedcbb",
"format": 1
},
{
"name": "roles/shrink_lv/files",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/shrink_lv/files/shrink.sh",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "1bdf2d37411ea35c3e91c6427677775fee37c783bfb948d55cbfc6e11a0c89aa",
"format": 1
},
{
"name": "roles/shrink_lv/files/module-setup.sh",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b0b9d303e9b2ce57ce4b6e8ee801471b7ae2763d65912677ce2c6f6d1d2c3f09",
"format": 1
},
{
"name": "roles/shrink_lv/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/shrink_lv/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "31e41f67bd856b5055e161e55c013397a145d3170c612bd7999fd2e9a3706526",
"format": 1
},
{
"name": "roles/shrink_lv/defaults",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/shrink_lv/defaults/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "ad38f2c50795ab06e4b13ffd6b78083c72508efc60e90681a85e2eb29f474a19",
"format": 1
},
{
"name": "roles/bigboot",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/bigboot/templates",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/bigboot/templates/increase-boot-partition.sh.j2",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "3a2850a945c6e14103b119713f8b0e0d7d5c58dcf9853cb80587cdbb49e75a72",
"format": 1
},
{
"name": "roles/bigboot/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/bigboot/tasks/prep_lvm.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e7ef8b4e143d1f04a71a91438606d8d5ef61186169dffa0661cdac5ac65c7c5d",
"format": 1
},
{
"name": "roles/bigboot/tasks/prep_btrfs.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e51169ec2589645540a59d49e91d35790d1f04bd60b9d2e0498d26a115bcbbf4",
"format": 1
},
{
"name": "roles/bigboot/tasks/get_boot_device_info.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6fefba635d26df89f1cf4308f93f82ead07e457e13d8a45c3eaa532c1338f9e7",
"format": 1
},
{
"name": "roles/bigboot/tasks/do_bigboot_reboot.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "342d2c4bbd4d91590e62984ee7d4ed78cb5ca97e6970a7a4d071839e89cf46c5",
"format": 1
},
{
"name": "roles/bigboot/tasks/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "dbd58033393e580312d40e2fed7a3dda261f26e8393d28830205c89b4b24c3a5",
"format": 1
},
{
"name": "roles/bigboot/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a8a1734d9f3803f6ccae7eb57531d04caf2da1be0adcfa41ed6c4d9b25dd85e1",
"format": 1
},
{
"name": "roles/bigboot/files",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/bigboot/files/module-setup.sh",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "0a3130bbbbda27623f91f00735285641f7480b75201df8f8252bc0b0e1215bfb",
"format": 1
},
{
"name": "roles/bigboot/files/bigboot.sh",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "17e3dde12e2b102eb2eca1a11f2a62e0e4b6d8b982c75da0a2dd194bf8be3122",
"format": 1
},
{
"name": "roles/bigboot/files/sfdisk.static",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "aebb9ea3b54f739dafef4b2e581ffbc6c72847487ba0dd6b1c7fceb26277985d",
"format": 1
},
{
"name": "roles/bigboot/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/bigboot/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "fafb7ac1801552bafb0a71a6d5ddea32ba4699acaa179a32689f316b47d7db07",
"format": 1
},
{
"name": "roles/bigboot/defaults",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/bigboot/defaults/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2546f99a2ca7a0a9a7fc93efdb989581d5ae6b8c5dae798877fb6bbf48f597de",
"format": 1
},
{
"name": "CONTRIBUTING.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b380120df01b9bdb8d8a9db37688f9ca2d56de8d65fa26503156a6819371d487",
"format": 1
},
{
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "ff0b7f4f22711076d82f03cf5f6fc77f947a4d4756e1292c9d1b9af116a2f935",
"format": 1
},
{
"name": "LICENSE",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "aa611f74dc5804197879f24d3f192f438845fae8e1dda9053e8637fccda0d05a",
"format": 1
},
{
"name": "changelogs",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "17bc457386b91d2469190e384af050f297d4109710d57bf686400ab53bab4c99",
"format": 1
},
{
"name": "changelogs/config.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "7d9693a308a709ae4d30cde6a08ed9ebd9cd6f1222aa251c44e6122ea84a50d3",
"format": 1
},
{
"name": "changelogs/fragments",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "changelogs/fragments/.gitkeep",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
"name": "changelogs/fragments/more_idempotent.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2d436efd751cff9c1d021f6af4ef405cbf8182fdcb169e1873ff8e8735404fff",
"format": 1
},
{
"name": "changelogs/fragments/btrfs_bigboot.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "3871d625e89227e10bd82c3a9ba3a16f5ff8f1d86887782f3ff971e13ac8e7f1",
"format": 1
},
{
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "dcde0629116149ea3e53f2ea71ea4195c3dd560266cad9b28737cc1c424cd755",
"format": 1
},
{
"name": ".github",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": ".github/workflows",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": ".github/workflows/pylint.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "af052a539c6b2dcfa7c8388c599e9f0bc58b72f90ea1ad3f958a2e9f6c32a234",
"format": 1
},
{
"name": ".github/workflows/ansible-lint.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "52b5b419c651b390716392600738b63b75d5b4431c127dab064fd39762cdf9f1",
"format": 1
},
{
"name": ".github/workflows/shellcheck.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2b8a5ab122a75643a2941d2fea2b876c8ef55f6eff41b20896e4ede9e76788c8",
"format": 1
},
{
"name": ".github/workflows/codespell.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "de7825f066c930358517755935e05df6ac8a4566df9a4a7affc7ec8c12d64a77",
"format": 1
},
{
"name": ".github/workflows/release.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "12387e85a382cf64ac8eed9e9c1282ae15e1d38c087efbc64f6ee16b7953367d",
"format": 1
},
{
"name": ".github/workflows/pycodestyle.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "016e1649c987f5265f14bbffd125e9cffdabb8a0adac1eb49fac556b3ca157d3",
"format": 1
},
{
"name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6a37c02d6aa4fce47aa56689a5e9c0c1f681f65479ae589732017edab3e318aa",
"format": 1
},
{
"name": "CODEOWNERS",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a2277cb0ecb46693ddba53cff778757052e73481e7ac2cf623f1a3e327913117",
"format": 1
},
{
"name": ".pycodestyle",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "9bc6971355aed1f873d5daa68c393c84fa3c8ef26f71c975f4fd8afa3b690156",
"format": 1
},
{
"name": "meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "16fee19238e61fab14a7974fd79800cd659367bbc8b052d9f0b190108f6a2c53",
"format": 1
}
],
"format": 1
}

View File

@ -0,0 +1,35 @@
{
"collection_info": {
"namespace": "infra",
"name": "lvm_snapshots",
"version": "2.1.0",
"authors": [
"Ygal Blum <yblum@redhat.com>",
"Bob Mader <bob@redhat.com>"
],
"readme": "README.md",
"tags": [
"ansible",
"lvm",
"storage"
],
"description": "Ansible role for creating and rolling back LVM snapshots",
"license": [],
"license_file": "LICENSE",
"dependencies": {
"community.general": "*"
},
"repository": "https://github.com/redhat-cop/infra.lvm_snapshots",
"documentation": "https://github.com/redhat-cop/infra.lvm_snapshots",
"homepage": "https://github.com/redhat-cop/infra.lvm_snapshots",
"issues": "https://github.com/redhat-cop/infra.lvm_snapshots/issues"
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "d5511e77f16eea344fcd5c4f879eae5752db76b5cdfc848ea541b2d8d02727bc",
"format": 1
},
"format": 1
}

View File

@ -1,13 +0,0 @@
# Bigboot Stickers
I presented a lightning talk at Red Hat Summit 2024 giving an overview and demo of the [`bigboot`](./roles/bigboot/) role. To help drive engagement and add some excitement, I had 100 of these stickers printed up. The talk was well received and all the stickers were given away.
![People grabbing stickers after my talk](images/bobtalk.jpg)
Since then, lots of folks asked me if there are more stickers. Well, I recently discovered how to make them available for folks to order at Sticker Mule. You can get 10 stickers for 9.99 USD. These are nice die cut stickers measuring 2.29" x 3" (58mm x 76mm). They are available with [plain white vinyl](https://www.stickermule.com/swapdisk/item/14744765) the same as the ones I gave away or with this [fancy pants holigraphic background](https://www.stickermule.com/swapdisk/item/17024469) that looks really cool.
![Bigboot stickers in white and holographic](images/bigboot-stickers.png)
If you have the means to print your own stickers or just want the source artwork, download the 1200 dpi image file [here](images/bigboot-sticker-transbg-1200dpi.png).
Add some bling to your laptop lid today and share with your friends. Cheers!

View File

@ -16,4 +16,4 @@ plugins:
strategy: {}
test: {}
vars: {}
version: 2.1.1
version: 2.0.3

View File

@ -122,29 +122,3 @@ releases:
- fix_image_copy.yml
- fix_lvm_config.yml
release_date: '2024-04-25'
2.1.0:
changes:
major_changes:
- add bigboot support for Btrfs next partition
minor_changes:
- do bigboot LVM changes with Ansible instead of pre-mount hook
- new bigboot_partition_size variable to make bigboot role more idempotent
- show console log output from bigboot even if quiet kernel arg is set
fragments:
- btrfs_bigboot.yml
- more_idempotent.yml
release_date: '2024-07-15'
2.1.1:
changes:
bugfixes:
- Fix bigboot repeatedly increasing the partition size
minor_changes:
- Improve documentation and example usage of initramfs role
- Improved console logging of bigboot progress to include percent complete
- Support check mode when using the bigboot role
fragments:
- 78-improve-bigboot-check-mode.yml
- bigboot_progress_meter.yml
- fix_80.yml
- initramfs_docs.yml
release_date: '2024-12-10'

View File

@ -1,2 +0,0 @@
minor_changes:
- Prevent snapshot creation when newest installed kernel is not in use

View File

@ -1,3 +0,0 @@
minor_changes:
- Updates to support hosts with bind/overlay mounts attached
to the device intended to be operated on.

View File

@ -0,0 +1,5 @@
major_changes:
- add bigboot support for Btrfs next partition
minor_changes:
- show console log output from bigboot even if quiet kernel arg is set
- do bigboot LVM changes with Ansible instead of pre-mount hook

View File

@ -0,0 +1,2 @@
minor_changes:
- new bigboot_partition_size variable to make bigboot role more idempotent

View File

@ -1,31 +0,0 @@
---
namespace: infra
name: lvm_snapshots
version: 2.1.1
readme: README.md
authors:
- Ygal Blum <yblum@redhat.com>
- Bob Mader <bob@redhat.com>
description: Ansible role for creating and rolling back LVM snapshots
license_file: LICENSE
tags:
- ansible
- lvm
- storage
dependencies:
"community.general": "*"
repository: https://github.com/redhat-cop/infra.lvm_snapshots
documentation: https://github.com/redhat-cop/infra.lvm_snapshots
homepage: https://github.com/redhat-cop/infra.lvm_snapshots
issues: https://github.com/redhat-cop/infra.lvm_snapshots/issues
build_ignore:
- .ansible-lint
- .pylintrc
- .yamllint
- .git
- .gitignore
- ansible.cfg
- requirements.yml
- tests
- vendor
...

Binary file not shown.

Before

Width:  |  Height:  |  Size: 347 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 700 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 951 KiB

View File

@ -1,4 +0,0 @@
---
collections:
- name: community.general
...

View File

@ -10,12 +10,10 @@ The role configures a dracut pre-mount hook that executes during a reboot to inc
> **WARNING!**
>
> All blocks of the partition above the boot partition are copied using `sfdisk` during the reboot and this can take several minutes or more depending on the size of that partition. The bigboot script periodically outputs progress messages to the system console to make it clear that the system is not in a "hung" state, but these progress messages may not be seen if `rhgb` or `quiet` kernel arguments are set. If the system is reset while the blocks are being copied, the partition will be irrecoverably corrupted. Do not assume the system is hung or force a reset during the bigboot reboot!
> All blocks of the partition above the boot partition are copied using `sfdisk` during the reboot and this can take several minutes or more depending on the size of that partition. The bigboot script periodically outputs progress messages to the system console to make it clear that the system is not in a "hung" state, but these progress messages may not be seen if `rhgb` or `quiet` kernel arguments are set. If the system is reset while the blocks are being copied, the partition will be irrcoverably corrupted. Do not assume the system is hung or force a reset during the bigboot reboot!
To learn more about how bigboot works, check out this [video](https://people.redhat.com/bmader/bigboot-demo.mp4).
Get bigboot stickers for your laptop [here](../../STICKERS.md).
## Role Variables
### `bigboot_partition_size` (String)

View File

@ -71,18 +71,10 @@ if ! ret=$(echo Yes | /usr/sbin/parted "$boot_disk_device" ---pretend-input-tty
fi
# Output progress messages to help impatient operators recognize the server is not "hung"
( sleep 9
while pid="$(ps -C sfdisk -o pid:1=)"; do
pct='??'
for fd in /proc/"$pid"/fd/*; do
if [[ "$(readlink "$fd")" == "$boot_disk_device" ]]; then
offset="$(awk '/pos:/ {print $2}' /proc/"$pid"/fdinfo/"${fd##*/}")"
pct="$((-100*offset/next_part_size+100))"
break
fi
done
echo "$name: Partition move is progressing, please wait! ($pct% complete)"
sleep 20
( sleep 4
while t="$(ps -C sfdisk -o cputime=)"; do
echo "$name: Partition move is progressing, please wait! ($t)"
sleep 120
done ) &
# Shift next partition

View File

@ -14,20 +14,17 @@
dest: /usr/lib/dracut/modules.d/99extend_boot/increase-boot-partition.sh
mode: '0554'
- name: Configure hook removal reboot cron
ansible.builtin.cron:
name: bigboot hook removal
cron_file: bigboot_hook_removal
user: root
special_time: reboot
job: '(rm -rf /usr/lib/dracut/modules.d/99extend_boot; rm -f /etc/cron.d/bigboot_hook_removal) > /dev/null 2>&1'
- name: Create the initramfs and reboot to run the module
vars:
initramfs_add_modules: "extend_boot"
ansible.builtin.include_role:
name: initramfs
- name: Remove dracut extend boot module
ansible.builtin.file:
path: /usr/lib/dracut/modules.d/99extend_boot
state: absent
- name: Retrieve mount points
ansible.builtin.setup:
gather_subset:
@ -49,4 +46,3 @@
Boot filesystem size is now
{{ bigboot_boot_fs_new_size | int | human_readable }}
({{ (bigboot_boot_fs_new_size | int - bigboot_boot_fs_original_size | int) | human_readable }} increase)
when: not ansible_check_mode

View File

@ -15,7 +15,6 @@
-o vg_extent_size {{ bigboot_next_partition_vg }}
changed_when: false
register: vg_extent_size
check_mode: false
- name: Align bigboot increase to extent size
ansible.builtin.set_fact:

View File

@ -6,7 +6,8 @@ The role is designed to be internal for this collection and support the automati
## Contents
To allow fast fail, the role provides a [`preflight.yml`](./tasks/preflight.yml) tasks file that should be included early in the play that ultimately includes the [`main`](./tasks/main.yml) role that actually reboots the host. Refer the usage section below for example.
To allow fast fail, the role provides a [`preflight.yml`](./tasks/preflight.yml) tasks file to be used at the start of the playbook.
Please note that the [`main`](./tasks/main.yml) task file will not run the preflight checks
## Role Variables
@ -14,8 +15,8 @@ All variables are optional
### `initramfs_add_modules`
`initramfs_add_modules` is a space-separated list of dracut modules to be added to the default set of modules.
See [`dracut --add`](https://man7.org/linux/man-pages/man8/dracut.8.html) option for details.
`initramfs_add_modules` is a a space-separated list of dracut modules to be added to the default set of modules.
See [`dracut`](https://man7.org/linux/man-pages/man8/dracut.8.html) `-a` parameter for details.
### `initramfs_backup_extension`
@ -38,54 +39,21 @@ The value is used for [`reboot_timeout`](https://docs.ansible.com/ansible/latest
Defaults to `7200`
## Example role usage
We will refer to the `bigboot` role of this collection to explain how the `initramfs` role can be used. Let's look at the `tasks/main.yaml` of the `bigboot` role. After the required facts have been gathered, the [Validate initramfs preflight](https://github.com/redhat-cop/infra.lvm_snapshots/blob/2.1.0/roles/bigboot/tasks/main.yaml#L10-L13) task includes the `initramfs` role preflight tasks:
## Example of a playbook to run the role
The following yaml is an example of a playbook that runs the role against a group of hosts named `rhel` and increasing the size of its boot partition by 1G.
The boot partition is automatically retrieved by the role by identifying the existing mounted partition to `/boot` and passing the information to the script using the `kernel_opts`.
```yaml
- name: Validate initramfs preflight
ansible.builtin.include_role:
name: initramfs
tasks_from: preflight
- name: Extend boot partition playbook
hosts: all
tasks:
- name: Validate initramfs preflight
ansible.builtin.include_role:
name: initramfs
tasks_from: preflight
- name: Create the initramfs and reboot to run the module
vars:
initramfs_add_modules: "my_extra_module"
ansible.builtin.include_role:
name: initramfs
```
If this is successful, the `bigboot` role continues to perform additional tasks and checks specific to its function. With that done, it moves on to `tasks/do_bigboot_reboot.yml` which [configures a dracut pre-mount hook](https://github.com/redhat-cop/infra.lvm_snapshots/blob/2.1.0/roles/bigboot/tasks/do_bigboot_reboot.yml#L1-L15) to prepare for the customized initramfs reboot:
```yaml
- name: Copy dracut pre-mount hook files
ansible.builtin.copy:
src: "{{ item }}"
dest: /usr/lib/dracut/modules.d/99extend_boot/
mode: "0554"
loop:
- bigboot.sh
- module-setup.sh
- sfdisk.static
- name: Resolve and copy pre-mount hook wrapper script
ansible.builtin.template:
src: increase-boot-partition.sh.j2
dest: /usr/lib/dracut/modules.d/99extend_boot/increase-boot-partition.sh
mode: '0554'
```
After that, it [includes](https://github.com/redhat-cop/infra.lvm_snapshots/blob/2.1.0/roles/bigboot/tasks/do_bigboot_reboot.yml#L17-L21) the main `initramfs` role which will create a custom initramfs built with the dracut hook configured above, reboot the host to run the hook, and lastly, restore the original initramfs after the reboot:
```yaml
- name: Create the initramfs and reboot to run the module
vars:
initramfs_add_modules: "extend_boot"
ansible.builtin.include_role:
name: initramfs
```
Also, note that while the `initramfs` role handles restoring the original initramfs, it is up to the including play to clean up the dracut hook files it configured. We see this with the [Remove dracut extend boot module](https://github.com/redhat-cop/infra.lvm_snapshots/blob/2.1.0/roles/bigboot/tasks/do_bigboot_reboot.yml#L23-L26) task that immediately follows the task including the `initramfs` role:
```yaml
- name: Remove dracut extend boot module
ansible.builtin.file:
path: /usr/lib/dracut/modules.d/99extend_boot
state: absent
```
The `shrink_lv` role of this collection is another [example](https://github.com/redhat-cop/infra.lvm_snapshots/blob/2.1.0/roles/shrink_lv/tasks/main.yaml#L13-L37) of using the `initramfs` role that you may study.

View File

@ -14,7 +14,7 @@
ansible.builtin.copy:
remote_src: true
src: /boot/initramfs-{{ initramfs_kernel_version }}.img
dest: /root/initramfs-{{ initramfs_kernel_version }}.img.{{ initramfs_backup_extension }}
dest: /boot/initramfs-{{ initramfs_kernel_version }}.img.{{ initramfs_backup_extension }}
mode: "0600"
- name: Create a new initramfs with the optional additional modules
@ -22,15 +22,11 @@
ansible.builtin.command: '/usr/bin/dracut {{ ((initramfs_add_modules | length) > 0) | ternary("-a", "") }} "{{ initramfs_add_modules }}" --kver {{ initramfs_kernel_version }} --force'
changed_when: true
- name: Configure initramfs restore reboot cron
ansible.builtin.cron:
name: initramfs restore
cron_file: initramfs_restore
user: root
special_time: reboot
# yamllint disable-line rule:line-length
job: '(mv -f /root/initramfs-{{ initramfs_kernel_version }}.img.{{ initramfs_backup_extension }} /boot/initramfs-{{ initramfs_kernel_version }}.img; rm -f /etc/cron.d/initramfs_restore) > /dev/null 2>&1'
- name: Reboot the server
- name: Reboot host
ansible.builtin.import_role:
name: verified_reboot
- name: Restore previous initramfs
# yamllint disable-line rule:line-length
ansible.builtin.command: '/usr/bin/mv -f /boot/initramfs-{{ initramfs_kernel_version }}.img.{{ initramfs_backup_extension }} /boot/initramfs-{{ initramfs_kernel_version }}.img'
changed_when: true

View File

@ -15,7 +15,6 @@
cmd: /sbin/grubby --default-kernel
register: initramfs_grubby_rc
changed_when: false
check_mode: false
- name: Parse default kernel version
ansible.builtin.set_fact:

View File

@ -0,0 +1 @@
shrink_lv_backup_extension: old

View File

@ -5,6 +5,7 @@
- name: Assert that the filesystem has shrunk
ansible.builtin.assert:
# yamllint disable-line rule:line-length
that: (shrink_lv_set_device['size_total'] | int) <= (item['size'] | ansible.builtin.human_to_bytes)
fail_msg: >
Logical Volume {{ item['device'] }} was NOT shrunk as requested.

View File

@ -2,10 +2,10 @@
- name: Make sure the required facts are available
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- kernel
- mounts
- "!all"
- "!min"
- kernel
- mounts
- name: Run preflight checks
ansible.builtin.include_tasks: preflight.yaml
@ -16,8 +16,8 @@
dest: /usr/lib/dracut/modules.d/99shrink_lv/
mode: "0554"
loop:
- module-setup.sh
- shrink.sh
- module-setup.sh
- shrink.sh
- name: Resolve and copy the shrink-start script
ansible.builtin.template:
@ -39,9 +39,9 @@
- name: Retrieve mount points
ansible.builtin.setup:
gather_subset:
- "!all"
- "!min"
- mounts
- "!all"
- "!min"
- mounts
- name: Check if device has shrunken successfully
ansible.builtin.include_tasks: check_if_shrunk.yml

View File

@ -1,9 +1,3 @@
- name: Validate default kernel is booted
ansible.builtin.include_role:
name: initramfs
tasks_from: preflight
when: snapshot_create_boot_backup
- name: Verify that all volumes exist
ansible.builtin.include_tasks: verify_volume_exists.yml
loop: "{{ snapshot_create_volumes }}"

View File

@ -1,39 +0,0 @@
# Testing the LVM Snapshot Role
## Prerequisites
- All the tests are in the form of ansible playbooks.
- All playbooks expect that the target machine will have a secondary storage device to be used for testing.
## Variables
The variables may be passed as part of the inventory or using a separate file.
```yaml
device: < device node without `/dev`. e.g. vdb >
```
## Ansible Configuration
In order to run the tests from the repo without having to install them,
the tests directory includes an [ansible.cfg](./ansible.cfg) file.
Make sure to point to it when running the test playbook
## Running a test
### Inventory file
In this example, the `device` parameter is passed in the `inventory.yml` file
```yaml
all:
hosts:
<FQDN of test machine>:
device: vdb
```
### Command line
Running the [snapshot revert playbook](./test-revert-playbook.yml) test from the repo
```bash
ANSIBLE_CONFIG=./tests/ansible.cfg ansible-playbook -K -i inventory.yml tests/test-revert-playbook.yml
```

View File

@ -1,2 +0,0 @@
[defaults]
roles_path=~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:../roles

View File

@ -1,15 +0,0 @@
- name: Generate snapshot list out of volumes list
ansible.builtin.set_fact:
_snapshots: "{{ (_snapshots | default([])) + [{'vg': volume_group, 'lv': item.name, 'size': item.size}] }}"
loop: "{{ volumes }}"
- name: Create the snapshot
vars:
snapshot_create_volumes: "{{ _snapshots }}"
snapshot_create_set_name: "{{ snapshot_set_name }}"
ansible.builtin.include_role:
name: snapshot_create
- name: Verify that the snapshot was created
ansible.builtin.include_tasks: verify-snapshot-created.yml
loop: "{{ volumes }}"

View File

@ -1,40 +0,0 @@
- name: Fill the volume
block:
- name: Set the retry count
ansible.builtin.set_fact:
_retry_count: "{{ (_retry_count | default('-1') | int) + 1 }}"
- name: Generate the Sub-Directory name
ansible.builtin.set_fact:
_sub_dir_name: "{{ lookup('community.general.random_string', upper=false, numbers=false, special=false) }}"
- name: Make a copy of the boot partition
ansible.builtin.copy:
src: /boot
dest: "{{ test_directory }}/{{ _sub_dir_name }}"
remote_src: true
mode: '0777'
- name: Get the status of the snapshot
ansible.builtin.command: "lvs --select 'lv_name = {{ volume_name }}_{{ snapshot_set_name }}' --reportformat json"
register: _lv_status_check
changed_when: false
- name: Store the snapshot data_percent
ansible.builtin.set_fact:
_snapshot_data_percent: "{{ ((_lv_status_check.stdout | from_json).report[0].lv[0].data_percent) }}"
- name: Check if snapshot is full enough
ansible.builtin.assert:
that: _snapshot_data_percent|float > snapshot_fill_percent|float
quiet: true
rescue:
- name: Check the retry count to avoid endless loop
ansible.builtin.assert:
that: (_retry_count|int) < (snapshot_max_retry|int)
fail_msg: "Ended after {{ snapshot_max_retry }} retries"
success_msg: "Volume is not full enough ({{ _snapshot_data_percent }}) - Run again..."
- name: Include the same tasks file again
ansible.builtin.include_tasks: fill-snapshot.yml

View File

@ -1,11 +0,0 @@
- name: Unmount the "{{ item.directory }}"
ansible.posix.mount:
path: "{{ item.directory }}"
state: absent
- name: Remove the logical volume
community.general.lvol:
vg: "{{ volume_group }}"
lv: "{{ item.name }}"
force: true
state: absent

View File

@ -1,25 +0,0 @@
- name: Cleanup the volumes
ansible.builtin.include_tasks: post-test-clean-volume.yml
loop: "{{ volumes }}"
- name: Remove the volume group
community.general.lvg:
vg: "{{ volume_group }}"
pvs: "/dev/{{ device }}1"
state: absent
- name: Remove the PV
ansible.builtin.command: "pvremove /dev/{{ device }}1"
changed_when: true
- name: Cleanup the system.devices file
ansible.builtin.lineinfile:
path: /etc/lvm/devices/system.devices
regexp: "IDTYPE=devname IDNAME=/dev/{{ device }}1 DEVNAME=/dev/{{ device }}1 PVID=. PART=1"
state: absent
- name: Delete the partition
community.general.parted:
device: "/dev/{{ device }}"
number: 1
state: absent

View File

@ -1,18 +0,0 @@
- name: Create the logical volume
community.general.lvol:
vg: "{{ volume_group }}"
lv: "{{ item.name }}"
size: "{{ item.size }}"
force: true
- name: Format the ext4 filesystem
community.general.filesystem:
fstype: ext4
dev: "/dev/{{ volume_group }}/{{ item.name }}"
- name: Mount the lv on "{{ item.directory }}"
ansible.posix.mount:
path: "{{ item.directory }}"
src: "/dev/{{ volume_group }}/{{ item.name }}"
fstype: ext4
state: mounted

View File

@ -1,23 +0,0 @@
- name: Create partition
community.general.parted:
device: "/dev/{{ device }}"
number: 1
part_end: 9GiB
flags:
- lvm
state: present
- name: Install lvm2 dependency
ansible.builtin.package:
name: lvm2
state: present
- name: Create the volume group
community.general.lvg:
vg: "{{ volume_group }}"
pvs: "/dev/{{ device }}1"
pesize: 16
- name: Create and prepare the volumes
ansible.builtin.include_tasks: pre-test-prepare-volume.yml
loop: "{{ volumes }}"

View File

@ -1,24 +0,0 @@
- name: Run lvs
ansible.builtin.command: lvs --select 'vg_name = {{ volume_group }} && origin = {{ item.name }}' --reportformat json
register: lvs_response
changed_when: false
- name: Parse report
ansible.builtin.set_fact:
lv_snapshot_array: "{{ (lvs_response.stdout | from_json).report[0].lv }}"
- name: Verify that the the snapshot exists
ansible.builtin.assert:
that: (lv_snapshot_array | length) == 1
fail_msg: >
The snapshot for {{ item.name }} was not created
- name: Get the snapshot name
ansible.builtin.set_fact:
snapshot_name: "{{ lv_snapshot_array[0].lv_name | default('n/a') }}"
- name: Verify that the the snapshot was named correctly
ansible.builtin.assert:
that: snapshot_name == item.name + '_' + snapshot_set_name
fail_msg: >
Snapshot name '{{ snapshot_name }}' is not as expected {{ item.name }}_{{ snapshot_set_name }}

View File

@ -1,16 +0,0 @@
- name: Run lvs
ansible.builtin.command: lvs --select 'vg_name = {{ volume_group }} && lv_name = {{ volume_name }}_{{ snapshot_set_name }}' --reportformat json
register: lvs_response
changed_when: false
- name: Parse report
ansible.builtin.set_fact:
lv_snapshot_report_array: "{{ (lvs_response.stdout | from_json).report[0].lv }}"
- name: Verify that the snapshot no longer exists
ansible.builtin.assert:
that: (lv_snapshot_report_array | length) == 0
fail_msg: >
The snapshot '{{ volume_name }}_{{ snapshot_set_name }}'' for
volume '{{ volume_name }}' in volume group '{{ volume_group }}'
still exists

View File

@ -40,10 +40,11 @@
# - name: Check for Bigboot state log and restore services to pre-Bigboot state
# ansible.builtin.import_tasks: tasks/restore_services.yml
- name: Cleanup old Bigboot service state log if present
ansible.builtin.file:
path: "{{ bigboot_disabled_services_log }}"
state: absent
# - name: Cleanup previous Bigboot state log if present
# ansible.builtin.file:
# path: "{{ bigboot_disabled_services_log }}"
# state: absent
# when:
# - bigboot_disabled_services_log_stat['stat']['exists'] | bool
...

View File

@ -4,9 +4,6 @@
client_ips: "{{ item['value']['ip_addresses'] | list | flatten }}"
namehost: "{{ item['value']['server_hostname'] }}"
- name: Include NFS export role # noqa var-naming
- name: Include NFS export role
ansible.builtin.include_role:
name: rhc.rear.nfs_export
vars:
rear_nfs_export_share: "{{ bigboot_nfs_backup_share }}"
rear_nfs_export_fsid: "{{ bigboot_nfs_backup_share_fsid }}"
name: rhc.rear.say_hi

View File

@ -1,9 +0,0 @@
---
- name: Validate ReaR backup completion
ansible.builtin.command:
cmd: "grep 'Finished running mkbackup workflow' /var/log/rear/rear-{{ ansible_hostname }}.log"
register: validate_backup_log
- name: Debug validate_backup_log
ansible.builtin.debug:
var: validate_backup_log

View File

@ -3,8 +3,35 @@ ansible_ssh_retries: 10
bigboot_size_target: 1G
# Filename of disabled services log:
bigboot_disabled_services_log: /var/IPE/IPU/el7to8/bigboot_disabled_services.log
bigboot_post_reboot_delay: 70
bigboot_reboot_timeout: 1800
# Define ReaR backup workflow:
rear_backup_workflow: bigboot
bigboot_skip_rear_backup: false
# Max value in minutes for services timeout threshold:
bigboot_service_max_timeout: 5
# List of services incompatible with calculations
# to obtain required disk information:
#
# (These services will ALWAYS be disabled)
bigboot_incompatible_services:
- docker.service
- named-chroot.service
# List of services which will be excluded from being
# disabled during Bigboot execution:
#
# (Services listed in `bigboot_incompatible_services`
# will ALWAYS be disabled regardless if they are protected or not)
bigboot_protected_services:
- sshd.service
- user@0.service
- network
- rhnsd.service
- rhnsd
- boksm.service
- SplunkForwarder.service
# Filename of disabled services log:
bigboot_disabled_services_log: /var/ipe/ipu/el7to8/bigboot_disabled_services.log

View File

@ -1,43 +0,0 @@
---
ansible_ssh_retries: 10
bigboot_size_target: 1G
bigboot_post_reboot_delay: 70
bigboot_reboot_timeout: 1800
bigboot_skip_rear_backup: false
# Max value in minutes for services timeout threshold:
bigboot_service_max_timeout: 5
# List of services incompatible with calculations
# to obtain required disk information:
#
# (These services will ALWAYS be disabled)
bigboot_incompatible_services:
- docker.service
- named-chroot.service
# List of services which will be excluded from being
# disabled during Bigboot execution:
#
# (Services listed in `bigboot_incompatible_services`
# will ALWAYS be disabled regardless if they are protected or not)
bigboot_protected_services:
- sshd.service
- user@0.service
- network
- rhnsd.service
- rhnsd
- boksm.service
- SplunkForwarder.service
# Filename of disabled services log:
bigboot_disabled_services_log: /var/ipe/ipu/el7to8/bigboot_disabled_services.log
bigboot_backup_success_path: "/var/log/IPE/IPU"
bigboot_backup_success_file: "{{ bigboot_backup_success_path }}/bigboot_rear_success"
bigboot_nfs_backup_share: "/backups/bigboot"
bigboot_nfs_backup_share_fsid: 20