Files
quick_test/nfs_lazy_reboot2 copy.yml

55 lines
1.8 KiB
YAML

---
- name: Something
hosts: temp
become: true
gather_facts: false
# With NFS share(s) being in a hung state, we cannot properly utilize
# `gather_facts` as that too hangs when it tries to figure out the current
# mounts on the system
tasks:
- name: Check for mounted NFS shares # noqa: command-instead-of-module
ansible.builtin.command: mount -t nfs,nfs4
register: nfs_mounts_result
changed_when: false
failed_when: nfs_mounts_result['rc'] not in [0, 32]
- name: Create a list of NFS mount points from command output
ansible.builtin.set_fact:
nfs_mount_list: "{{ nfs_mounts_result['stdout_lines'] | map('split') | map(attribute=2) | list }}"
- name: Verify mount status and reboot
block:
- name: Verify mount status
ansible.builtin.command: "ls {{ item }}"
timeout: 5
register: r_verify_mounts
loop: "{{ nfs_mount_list }}"
loop_control:
label: "{{ item }}"
rescue:
- name: Debug item
ansible.builtin.debug:
msg: "{{ r_verify_mounts }}"
- name: Group failed shares together
ansible.builtin.set_fact:
failed_nfs_shares:
"{{ r_verify_mounts['results'] | selectattr('failed') | map(attribute='item') | list }}"
- name: Debugb failed_nfs_shares
ansible.builtin.debug:
msg: "{{ failed_nfs_shares }}"
- name: Lazily unmount failed shares
ansible.builtin.command: "umount -f -l {{ item }}"
loop: "{{ failed_nfs_shares }}"
loop_control:
label: "{{ item }}"
always:
- name: Reboot host if file changes # noqa: no-handler
ansible.builtin.import_role:
name: verified_reboot