Initial project commit

This commit is contained in:
2024-12-16 19:59:57 -05:00
commit fa1b5cce00
3409 changed files with 460909 additions and 0 deletions

View File

@ -0,0 +1,529 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""short_description: Check or wait for migrations between nodes"""
# Copyright (c) 2018, Albert Autin
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: aerospike_migrations
short_description: Check or wait for migrations between nodes
description:
- This can be used to check for migrations in a cluster.
This makes it easy to do a rolling upgrade/update on Aerospike nodes.
- If waiting for migrations is not desired, simply just poll until
port 3000 if available or asinfo -v status returns ok
author: "Albert Autin (@Alb0t)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
host:
description:
- Which host do we use as seed for info connection
required: false
type: str
default: localhost
port:
description:
- Which port to connect to Aerospike on (service port)
required: false
type: int
default: 3000
connect_timeout:
description:
- How long to try to connect before giving up (milliseconds)
required: false
type: int
default: 1000
consecutive_good_checks:
description:
- How many times should the cluster report "no migrations"
consecutively before returning OK back to ansible?
required: false
type: int
default: 3
sleep_between_checks:
description:
- How long to sleep between each check (seconds).
required: false
type: int
default: 60
tries_limit:
description:
- How many times do we poll before giving up and failing?
default: 300
required: false
type: int
local_only:
description:
- Do you wish to only check for migrations on the local node
before returning, or do you want all nodes in the cluster
to finish before returning?
required: true
type: bool
min_cluster_size:
description:
- Check will return bad until cluster size is met
or until tries is exhausted
required: false
type: int
default: 1
fail_on_cluster_change:
description:
- Fail if the cluster key changes
if something else is changing the cluster, we may want to fail
required: false
type: bool
default: true
migrate_tx_key:
description:
- The metric key used to determine if we have tx migrations
remaining. Changeable due to backwards compatibility.
required: false
type: str
default: migrate_tx_partitions_remaining
migrate_rx_key:
description:
- The metric key used to determine if we have rx migrations
remaining. Changeable due to backwards compatibility.
required: false
type: str
default: migrate_rx_partitions_remaining
target_cluster_size:
description:
- When all aerospike builds in the cluster are greater than
version 4.3, then the C(cluster-stable) info command will be used.
Inside this command, you can optionally specify what the target
cluster size is - but it is not necessary. You can still rely on
min_cluster_size if you don't want to use this option.
- If this option is specified on a cluster that has at least 1
host <4.3 then it will be ignored until the min version reaches
4.3.
required: false
type: int
'''
EXAMPLES = '''
# check for migrations on local node
- name: Wait for migrations on local node before proceeding
community.general.aerospike_migrations:
host: "localhost"
connect_timeout: 2000
consecutive_good_checks: 5
sleep_between_checks: 15
tries_limit: 600
local_only: false
# example playbook:
- name: Upgrade aerospike
hosts: all
become: true
serial: 1
tasks:
- name: Install dependencies
ansible.builtin.apt:
name:
- python
- python-pip
- python-setuptools
state: latest
- name: Setup aerospike
ansible.builtin.pip:
name: aerospike
# check for migrations every (sleep_between_checks)
# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
# Will exit if any exception, which can be caused by bad nodes,
# nodes not returning data, or other reasons.
# Maximum runtime before giving up in this case will be:
# Tries Limit * Sleep Between Checks * delay * retries
- name: Wait for aerospike migrations
community.general.aerospike_migrations:
local_only: true
sleep_between_checks: 1
tries_limit: 5
consecutive_good_checks: 3
fail_on_cluster_change: true
min_cluster_size: 3
target_cluster_size: 4
register: migrations_check
until: migrations_check is succeeded
changed_when: false
delay: 60
retries: 120
- name: Another thing
ansible.builtin.shell: |
echo foo
- name: Reboot
ansible.builtin.reboot:
'''
RETURN = '''
# Returns only a success/failure result. Changed is always false.
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
LIB_FOUND_ERR = None
try:
import aerospike
from time import sleep
import re
except ImportError as ie:
LIB_FOUND = False
LIB_FOUND_ERR = traceback.format_exc()
else:
LIB_FOUND = True
def run_module():
"""run ansible module"""
module_args = dict(
host=dict(type='str', required=False, default='localhost'),
port=dict(type='int', required=False, default=3000),
connect_timeout=dict(type='int', required=False, default=1000),
consecutive_good_checks=dict(type='int', required=False, default=3),
sleep_between_checks=dict(type='int', required=False, default=60),
tries_limit=dict(type='int', required=False, default=300),
local_only=dict(type='bool', required=True),
min_cluster_size=dict(type='int', required=False, default=1),
target_cluster_size=dict(type='int', required=False, default=None),
fail_on_cluster_change=dict(type='bool', required=False, default=True),
migrate_tx_key=dict(type='str', required=False, no_log=False,
default="migrate_tx_partitions_remaining"),
migrate_rx_key=dict(type='str', required=False, no_log=False,
default="migrate_rx_partitions_remaining")
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if not LIB_FOUND:
module.fail_json(msg=missing_required_lib('aerospike'),
exception=LIB_FOUND_ERR)
try:
if module.check_mode:
has_migrations, skip_reason = False, None
else:
migrations = Migrations(module)
has_migrations, skip_reason = migrations.has_migs(
module.params['local_only']
)
if has_migrations:
module.fail_json(msg="Failed.", skip_reason=skip_reason)
except Exception as e:
module.fail_json(msg="Error: {0}".format(e))
module.exit_json(**result)
class Migrations:
""" Check or wait for migrations between nodes """
def __init__(self, module):
self.module = module
self._client = self._create_client().connect()
self._nodes = {}
self._update_nodes_list()
self._cluster_statistics = {}
self._update_cluster_statistics()
self._namespaces = set()
self._update_cluster_namespace_list()
self._build_list = set()
self._update_build_list()
self._start_cluster_key = \
self._cluster_statistics[self._nodes[0]]['cluster_key']
def _create_client(self):
""" TODO: add support for auth, tls, and other special features
I won't use those features, so I'll wait until somebody complains
or does it for me (Cross fingers)
create the client object"""
config = {
'hosts': [
(self.module.params['host'], self.module.params['port'])
],
'policies': {
'timeout': self.module.params['connect_timeout']
}
}
return aerospike.client(config)
def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
"""delimiter is for separate stats that come back, NOT for kv
separation which is ="""
if node is None: # If no node passed, use the first one (local)
node = self._nodes[0]
data = self._client.info_node(cmd, node)
data = data.split("\t")
if len(data) != 1 and len(data) != 2:
self.module.fail_json(
msg="Unexpected number of values returned in info command: " +
str(len(data))
)
# data will be in format 'command\touput'
data = data[-1]
data = data.rstrip("\n\r")
data_arr = data.split(delimiter)
# some commands don't return in kv format
# so we dont want a dict from those.
if '=' in data:
retval = dict(
metric.split("=", 1) for metric in data_arr
)
else:
# if only 1 element found, and not kv, return just the value.
if len(data_arr) == 1:
retval = data_arr[0]
else:
retval = data_arr
return retval
def _update_build_list(self):
"""creates self._build_list which is a unique list
of build versions."""
self._build_list = set()
for node in self._nodes:
build = self._info_cmd_helper('build', node)
self._build_list.add(build)
# just checks to see if the version is 4.3 or greater
def _can_use_cluster_stable(self):
# if version <4.3 we can't use cluster-stable info cmd
# regex hack to check for versions beginning with 0-3 or
# beginning with 4.0,4.1,4.2
if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
return False
return True
def _update_cluster_namespace_list(self):
""" make a unique list of namespaces
TODO: does this work on a rolling namespace add/deletion?
thankfully if it doesn't, we dont need this on builds >=4.3"""
self._namespaces = set()
for node in self._nodes:
namespaces = self._info_cmd_helper('namespaces', node)
for namespace in namespaces:
self._namespaces.add(namespace)
def _update_cluster_statistics(self):
"""create a dict of nodes with their related stats """
self._cluster_statistics = {}
for node in self._nodes:
self._cluster_statistics[node] = \
self._info_cmd_helper('statistics', node)
def _update_nodes_list(self):
"""get a fresh list of all the nodes"""
self._nodes = self._client.get_nodes()
if not self._nodes:
self.module.fail_json("Failed to retrieve at least 1 node.")
def _namespace_has_migs(self, namespace, node=None):
"""returns a True or False.
Does the namespace have migrations for the node passed?
If no node passed, uses the local node or the first one in the list"""
namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
try:
namespace_tx = \
int(namespace_stats[self.module.params['migrate_tx_key']])
namespace_rx = \
int(namespace_stats[self.module.params['migrate_rx_key']])
except KeyError:
self.module.fail_json(
msg="Did not find partition remaining key:" +
self.module.params['migrate_tx_key'] +
" or key:" +
self.module.params['migrate_rx_key'] +
" in 'namespace/" +
namespace +
"' output."
)
except TypeError:
self.module.fail_json(
msg="namespace stat returned was not numerical"
)
return namespace_tx != 0 or namespace_rx != 0
def _node_has_migs(self, node=None):
"""just calls namespace_has_migs and
if any namespace has migs returns true"""
migs = 0
self._update_cluster_namespace_list()
for namespace in self._namespaces:
if self._namespace_has_migs(namespace, node):
migs += 1
return migs != 0
def _cluster_key_consistent(self):
"""create a dictionary to store what each node
returns the cluster key as. we should end up with only 1 dict key,
with the key being the cluster key."""
cluster_keys = {}
for node in self._nodes:
cluster_key = self._cluster_statistics[node][
'cluster_key']
if cluster_key not in cluster_keys:
cluster_keys[cluster_key] = 1
else:
cluster_keys[cluster_key] += 1
if len(cluster_keys.keys()) == 1 and \
self._start_cluster_key in cluster_keys:
return True
return False
def _cluster_migrates_allowed(self):
"""ensure all nodes have 'migrate_allowed' in their stats output"""
for node in self._nodes:
node_stats = self._info_cmd_helper('statistics', node)
allowed = node_stats['migrate_allowed']
if allowed == "false":
return False
return True
def _cluster_has_migs(self):
"""calls node_has_migs for each node"""
migs = 0
for node in self._nodes:
if self._node_has_migs(node):
migs += 1
if migs == 0:
return False
return True
def _has_migs(self, local):
if local:
return self._local_node_has_migs()
return self._cluster_has_migs()
def _local_node_has_migs(self):
return self._node_has_migs(None)
def _is_min_cluster_size(self):
"""checks that all nodes in the cluster are returning the
minimum cluster size specified in their statistics output"""
sizes = set()
for node in self._cluster_statistics:
sizes.add(int(self._cluster_statistics[node]['cluster_size']))
if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
return False
if (min(sizes)) >= self.module.params['min_cluster_size']:
return True
return False
def _cluster_stable(self):
"""Added 4.3:
cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
Returns the current 'cluster_key' when the following are satisfied:
If 'size' is specified then the target node's 'cluster-size'
must match size.
If 'ignore-migrations' is either unspecified or 'false' then
the target node's migrations counts must be zero for the provided
'namespace' or all namespaces if 'namespace' is not provided."""
cluster_key = set()
cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
cmd = "cluster-stable:"
target_cluster_size = self.module.params['target_cluster_size']
if target_cluster_size is not None:
cmd = cmd + "size=" + str(target_cluster_size) + ";"
for node in self._nodes:
try:
cluster_key.add(self._info_cmd_helper(cmd, node))
except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
if 'unstable-cluster' in e.msg:
return False
raise e
if len(cluster_key) == 1:
return True
return False
def _cluster_good_state(self):
"""checks a few things to make sure we're OK to say the cluster
has no migs. It could be in a unhealthy condition that does not allow
migs, or a split brain"""
if self._cluster_key_consistent() is not True:
return False, "Cluster key inconsistent."
if self._is_min_cluster_size() is not True:
return False, "Cluster min size not reached."
if self._cluster_migrates_allowed() is not True:
return False, "migrate_allowed is false somewhere."
return True, "OK."
def has_migs(self, local=True):
"""returns a boolean, False if no migrations otherwise True"""
consecutive_good = 0
try_num = 0
skip_reason = list()
while \
try_num < int(self.module.params['tries_limit']) and \
consecutive_good < \
int(self.module.params['consecutive_good_checks']):
self._update_nodes_list()
self._update_cluster_statistics()
# These checks are outside of the while loop because
# we probably want to skip & sleep instead of failing entirely
stable, reason = self._cluster_good_state()
if stable is not True:
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + reason
)
else:
if self._can_use_cluster_stable():
if self._cluster_stable():
consecutive_good += 1
else:
consecutive_good = 0
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + " cluster_stable"
)
elif self._has_migs(local):
# print("_has_migs")
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + " migrations"
)
consecutive_good = 0
else:
consecutive_good += 1
if consecutive_good == self.module.params[
'consecutive_good_checks']:
break
try_num += 1
sleep(self.module.params['sleep_between_checks'])
# print(skip_reason)
if consecutive_good == self.module.params['consecutive_good_checks']:
return False, None
return True, skip_reason
def main():
"""main method for ansible module"""
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,169 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: airbrake_deployment
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Patrick Humpal (@phumpal)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
project_id:
description:
- Airbrake PROJECT_ID
required: true
type: str
version_added: '0.2.0'
project_key:
description:
- Airbrake PROJECT_KEY.
required: true
type: str
version_added: '0.2.0'
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
type: str
user:
description:
- The username of the person doing the deployment
required: false
type: str
repo:
description:
- URL of the project repository
required: false
type: str
revision:
description:
- A hash, number, tag, or other identifier showing what revision from version control was deployed
required: false
type: str
version:
description:
- A string identifying what version was deployed
required: false
type: str
version_added: '1.0.0'
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://api.airbrake.io/api/v4/projects/"
type: str
validate_certs:
description:
- If V(false), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: true
type: bool
requirements: []
'''
EXAMPLES = '''
- name: Notify airbrake about an app deployment
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: '4.2'
- name: Notify airbrake about an app deployment, using git hash as revision
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
version: '0.2.0'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
project_id=dict(required=True, no_log=True, type='str'),
project_key=dict(required=True, no_log=True, type='str'),
environment=dict(required=True, type='str'),
user=dict(required=False, type='str'),
repo=dict(required=False, type='str'),
revision=dict(required=False, type='str'),
version=dict(required=False, type='str'),
url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True,
)
# Build list of params
params = {}
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
if module.params["environment"]:
params["environment"] = module.params["environment"]
if module.params["user"]:
params["username"] = module.params["user"]
if module.params["repo"]:
params["repository"] = module.params["repo"]
if module.params["revision"]:
params["revision"] = module.params["revision"]
if module.params["version"]:
params["version"] = module.params["version"]
# Build deploy url
url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
json_body = module.jsonify(params)
# Build header
headers = {'Content-Type': 'application/json'}
# Notify Airbrake of deploy
response, info = fetch_url(module, url, data=json_body,
headers=headers, method='POST')
if info['status'] == 200 or info['status'] == 201:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,377 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author:
- Kairo Araujo (@kairoaraujo)
module: aix_devices
short_description: Manages AIX devices
description:
- This module discovers, defines, removes and modifies attributes of AIX devices.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
attributes:
description:
- A list of device attributes.
type: dict
device:
description:
- The name of the device.
- V(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
type: str
force:
description:
- Forces action.
type: bool
default: false
recursive:
description:
- Removes or defines a device and children devices.
type: bool
default: false
state:
description:
- Controls the device state.
- V(available) (alias V(present)) rescan a specific device or all devices (when O(device) is not specified).
- V(removed) (alias V(absent) removes a device.
- V(defined) changes device to Defined state.
type: str
choices: [ available, defined, removed ]
default: available
'''
EXAMPLES = r'''
- name: Scan new devices
community.general.aix_devices:
device: all
state: available
- name: Scan new virtual devices (vio0)
community.general.aix_devices:
device: vio0
state: available
- name: Removing IP alias to en0
community.general.aix_devices:
device: en0
attributes:
delalias4: 10.0.0.100,255.255.255.0
- name: Removes ent2
community.general.aix_devices:
device: ent2
state: removed
- name: Put device en2 in Defined
community.general.aix_devices:
device: en2
state: defined
- name: Removes ent4 (inexistent).
community.general.aix_devices:
device: ent4
state: removed
- name: Put device en4 in Defined (inexistent)
community.general.aix_devices:
device: en4
state: defined
- name: Put vscsi1 and children devices in Defined state.
community.general.aix_devices:
device: vscsi1
recursive: true
state: defined
- name: Removes vscsi1 and children devices.
community.general.aix_devices:
device: vscsi1
recursive: true
state: removed
- name: Changes en1 mtu to 9000 and disables arp.
community.general.aix_devices:
device: en1
attributes:
mtu: 900
arp: 'off'
state: available
- name: Configure IP, netmask and set en1 up.
community.general.aix_devices:
device: en1
attributes:
netaddr: 192.168.0.100
netmask: 255.255.255.0
state: up
state: available
- name: Adding IP alias to en0
community.general.aix_devices:
device: en0
attributes:
alias4: 10.0.0.100,255.255.255.0
state: available
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
def _check_device(module, device):
"""
Check if device already exists and the state.
Args:
module: Ansible module.
device: device to be checked.
Returns: bool, device state
"""
lsdev_cmd = module.get_bin_path('lsdev', True)
rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
if rc != 0:
module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
if lsdev_out:
device_state = lsdev_out.split()[1]
return True, device_state
device_state = None
return False, device_state
def _check_device_attr(module, device, attr):
"""
Args:
module: Ansible module.
device: device to check attributes.
attr: attribute to be checked.
Returns:
"""
lsattr_cmd = module.get_bin_path('lsattr', True)
rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
hidden_attrs = ['delalias4', 'delalias6']
if rc == 255:
if attr in hidden_attrs:
current_param = ''
else:
current_param = None
return current_param
elif rc != 0:
module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
current_param = lsattr_out.split()[1]
return current_param
def discover_device(module, device):
""" Discover AIX devices."""
cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
if device is not None:
device = "-l %s" % device
else:
device = ''
changed = True
msg = ''
if not module.check_mode:
rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
changed = True
msg = cfgmgr_out
return changed, msg
def change_device_attr(module, attributes, device, force):
""" Change AIX device attribute. """
attr_changed = []
attr_not_changed = []
attr_invalid = []
chdev_cmd = module.get_bin_path('chdev', True)
for attr in list(attributes.keys()):
new_param = attributes[attr]
current_param = _check_device_attr(module, device, attr)
if current_param is None:
attr_invalid.append(attr)
elif current_param != new_param:
if force:
cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
else:
cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
if not module.check_mode:
rc, chdev_out, err = module.run_command(cmd)
if rc != 0:
module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
attr_changed.append(attributes[attr])
else:
attr_not_changed.append(attributes[attr])
if len(attr_changed) > 0:
changed = True
attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
else:
changed = False
attr_changed_msg = ''
if len(attr_not_changed) > 0:
attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
else:
attr_not_changed_msg = ''
if len(attr_invalid) > 0:
attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
else:
attr_invalid_msg = ''
msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
return changed, msg
def remove_device(module, device, force, recursive, state):
""" Puts device in defined state or removes device. """
state_opt = {
'removed': '-d',
'absent': '-d',
'defined': ''
}
recursive_opt = {
True: '-R',
False: ''
}
recursive = recursive_opt[recursive]
state = state_opt[state]
changed = True
msg = ''
rmdev_cmd = module.get_bin_path('rmdev', True)
if not module.check_mode:
if state:
rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
else:
rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
if rc != 0:
module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
msg = rmdev_out
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
attributes=dict(type='dict'),
device=dict(type='str'),
force=dict(type='bool', default=False),
recursive=dict(type='bool', default=False),
state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
),
supports_check_mode=True,
)
force_opt = {
True: '-f',
False: '',
}
attributes = module.params['attributes']
device = module.params['device']
force = force_opt[module.params['force']]
recursive = module.params['recursive']
state = module.params['state']
result = dict(
changed=False,
msg='',
)
if state == 'available' or state == 'present':
if attributes:
# change attributes on device
device_status, device_state = _check_device(module, device)
if device_status:
result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
else:
result['msg'] = "Device %s does not exist." % device
else:
# discovery devices (cfgmgr)
if device and device != 'all':
device_status, device_state = _check_device(module, device)
if device_status:
# run cfgmgr on specific device
result['changed'], result['msg'] = discover_device(module, device)
else:
result['msg'] = "Device %s does not exist." % device
else:
result['changed'], result['msg'] = discover_device(module, device)
elif state == 'removed' or state == 'absent' or state == 'defined':
if not device:
result['msg'] = "device is required to removed or defined state."
else:
# Remove device
check_device, device_state = _check_device(module, device)
if check_device:
if state == 'defined' and device_state == 'Defined':
result['changed'] = False
result['msg'] = 'Device %s already in Defined' % device
else:
result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
else:
result['msg'] = "Device %s does not exist." % device
else:
result['msg'] = "Unexpected state %s." % state
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,619 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author:
- Kairo Araujo (@kairoaraujo)
module: aix_filesystem
short_description: Configure LVM and NFS file systems for AIX
description:
- This module creates, removes, mount and unmount LVM and NFS file system for
AIX using C(/etc/filesystems).
- For LVM file systems is possible to resize a file system.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
account_subsystem:
description:
- Specifies whether the file system is to be processed by the accounting subsystem.
type: bool
default: false
attributes:
description:
- Specifies attributes for files system separated by comma.
type: list
elements: str
default:
- agblksize=4096
- isnapshot=no
auto_mount:
description:
- File system is automatically mounted at system restart.
type: bool
default: true
device:
description:
- Logical volume (LV) device name or remote export device to create a NFS file system.
- It is used to create a file system on an already existing logical volume or the exported NFS file system.
- If not mentioned a new logical volume name will be created following AIX standards (LVM).
type: str
fs_type:
description:
- Specifies the virtual file system type.
type: str
default: jfs2
permissions:
description:
- Set file system permissions. V(rw) (read-write) or V(ro) (read-only).
type: str
choices: [ ro, rw ]
default: rw
mount_group:
description:
- Specifies the mount group.
type: str
filesystem:
description:
- Specifies the mount point, which is the directory where the file system will be mounted.
type: str
required: true
nfs_server:
description:
- Specifies a Network File System (NFS) server.
type: str
rm_mount_point:
description:
- Removes the mount point directory when used with state V(absent).
type: bool
default: false
size:
description:
- Specifies the file system size.
- For already V(present) it will be resized.
- 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
it will be in Megabytes. If the value has G specified it will be in
Gigabytes.
- If no M or G the value will be 512-byte blocks.
- If "+" is specified in begin of value, the value will be added.
- If "-" is specified in begin of value, the value will be removed.
- If "+" or "-" is not specified, the total value will be the specified.
- Size will respects the LVM AIX standards.
type: str
state:
description:
- Controls the file system state.
- V(present) check if file system exists, creates or resize.
- V(absent) removes existing file system if already V(unmounted).
- V(mounted) checks if the file system is mounted or mount the file system.
- V(unmounted) check if the file system is unmounted or unmount the file system.
type: str
choices: [ absent, mounted, present, unmounted ]
default: present
vg:
description:
- Specifies an existing volume group (VG).
type: str
notes:
- For more O(attributes), please check "crfs" AIX manual.
'''
EXAMPLES = r'''
- name: Create filesystem in a previously defined logical volume.
community.general.aix_filesystem:
device: testlv
filesystem: /testfs
state: present
- name: Creating NFS filesystem from nfshost.
community.general.aix_filesystem:
device: /home/ftp
nfs_server: nfshost
filesystem: /home/ftp
state: present
- name: Creating a new file system without a previously logical volume.
community.general.aix_filesystem:
filesystem: /newfs
size: 1G
state: present
vg: datavg
- name: Unmounting /testfs.
community.general.aix_filesystem:
filesystem: /testfs
state: unmounted
- name: Resizing /mksysb to +512M.
community.general.aix_filesystem:
filesystem: /mksysb
size: +512M
state: present
- name: Resizing /mksysb to 11G.
community.general.aix_filesystem:
filesystem: /mksysb
size: 11G
state: present
- name: Resizing /mksysb to -2G.
community.general.aix_filesystem:
filesystem: /mksysb
size: -2G
state: present
- name: Remove NFS filesystem /home/ftp.
community.general.aix_filesystem:
filesystem: /home/ftp
rm_mount_point: true
state: absent
- name: Remove /newfs.
community.general.aix_filesystem:
filesystem: /newfs
rm_mount_point: true
state: absent
'''
RETURN = r'''
changed:
description: Return changed for aix_filesystems actions as true or false.
returned: always
type: bool
msg:
description: Return message regarding the action.
returned: always
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils._mount import ismount
import re
def _fs_exists(module, filesystem):
"""
Check if file system already exists on /etc/filesystems.
:param module: Ansible module.
:param community.general.filesystem: filesystem name.
:return: True or False.
"""
lsfs_cmd = module.get_bin_path('lsfs', True)
rc, lsfs_out, err = module.run_command([lsfs_cmd, "-l", filesystem])
if rc == 1:
if re.findall("No record matching", err):
return False
else:
module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
else:
return True
def _check_nfs_device(module, nfs_host, device):
"""
Validate if NFS server is exporting the device (remote export).
:param module: Ansible module.
:param nfs_host: nfs_host parameter, NFS server.
:param device: device parameter, remote export.
:return: True or False.
"""
showmount_cmd = module.get_bin_path('showmount', True)
rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host])
if rc != 0:
module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
else:
showmount_data = showmount_out.splitlines()
for line in showmount_data:
if line.split(':')[1] == device:
return True
return False
def _validate_vg(module, vg):
"""
Check the current state of volume group.
:param module: Ansible module argument spec.
:param vg: Volume Group name.
:return: True (VG in varyon state) or False (VG in varyoff state) or
None (VG does not exist), message.
"""
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"])
if rc != 0:
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
rc, current_all_vgs, err = module.run_command([lsvg_cmd])
if rc != 0:
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
if vg in current_all_vgs and vg not in current_active_vgs:
msg = "Volume group %s is in varyoff state." % vg
return False, msg
elif vg in current_active_vgs:
msg = "Volume group %s is in varyon state." % vg
return True, msg
else:
msg = "Volume group %s does not exist." % vg
return None, msg
def resize_fs(module, filesystem, size):
""" Resize LVM file system. """
chfs_cmd = module.get_bin_path('chfs', True)
if not module.check_mode:
rc, chfs_out, err = module.run_command([chfs_cmd, "-a", "size=%s" % size, filesystem])
if rc == 28:
changed = False
return changed, chfs_out
elif rc != 0:
if re.findall('Maximum allocation for logical', err):
changed = False
return changed, err
else:
module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
else:
if re.findall('The filesystem size is already', chfs_out):
changed = False
else:
changed = True
return changed, chfs_out
else:
changed = True
msg = ''
return changed, msg
def create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
account_subsystem, permissions, nfs_server, attributes):
""" Create LVM file system or NFS remote mount point. """
attributes = ' -a '.join(attributes)
# Parameters definition.
account_subsys_opt = {
True: '-t yes',
False: '-t no'
}
if nfs_server is not None:
auto_mount_opt = {
True: '-A',
False: '-a'
}
else:
auto_mount_opt = {
True: '-A yes',
False: '-A no'
}
if size is None:
size = ''
else:
size = "-a size=%s" % size
if device is None:
device = ''
else:
device = "-d %s" % device
if vg is None:
vg = ''
else:
vg_state, msg = _validate_vg(module, vg)
if vg_state:
vg = "-g %s" % vg
else:
changed = False
return changed, msg
if mount_group is None:
mount_group = ''
else:
mount_group = "-u %s" % mount_group
auto_mount = auto_mount_opt[auto_mount]
account_subsystem = account_subsys_opt[account_subsystem]
if nfs_server is not None:
# Creates a NFS file system.
mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
if not module.check_mode:
rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"])
if rc != 0:
module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
else:
changed = True
msg = "NFS file system %s created." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
else:
# Creates a LVM file system.
crfs_cmd = module.get_bin_path('crfs', True)
if not module.check_mode:
cmd = [crfs_cmd]
cmd.append("-v")
cmd.append(fs_type)
if vg:
(flag, value) = vg.split()
cmd.append(flag)
cmd.append(value)
if device:
(flag, value) = device.split()
cmd.append(flag)
cmd.append(value)
cmd.append("-m")
cmd.append(filesystem)
if mount_group:
(flag, value) = mount_group.split()
cmd.append(flag)
cmd.append(value)
if auto_mount:
(flag, value) = auto_mount.split()
cmd.append(flag)
cmd.append(value)
if account_subsystem:
(flag, value) = account_subsystem.split()
cmd.append(flag)
cmd.append(value)
cmd.append("-p")
cmd.append(permissions)
if size:
(flag, value) = size.split()
cmd.append(flag)
cmd.append(value)
if attributes:
splitted_attributes = attributes.split()
cmd.append("-a")
for value in splitted_attributes:
cmd.append(value)
rc, crfs_out, err = module.run_command(cmd)
if rc == 10:
module.exit_json(
msg="Using a existent previously defined logical volume, "
"volume group needs to be empty. %s" % err)
elif rc != 0:
module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
else:
changed = True
return changed, crfs_out
else:
changed = True
msg = ''
return changed, msg
def remove_fs(module, filesystem, rm_mount_point):
""" Remove an LVM file system or NFS entry. """
# Command parameters.
rm_mount_point_opt = {
True: '-r',
False: ''
}
rm_mount_point = rm_mount_point_opt[rm_mount_point]
rmfs_cmd = module.get_bin_path('rmfs', True)
if not module.check_mode:
cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem]
rc, rmfs_out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
else:
changed = True
msg = rmfs_out
if not rmfs_out:
msg = "File system %s removed." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
def mount_fs(module, filesystem):
""" Mount a file system. """
mount_cmd = module.get_bin_path('mount', True)
if not module.check_mode:
rc, mount_out, err = module.run_command([mount_cmd, filesystem])
if rc != 0:
module.fail_json(msg="Failed to run mount. Error message: %s" % err)
else:
changed = True
msg = "File system %s mounted." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
def unmount_fs(module, filesystem):
""" Unmount a file system."""
unmount_cmd = module.get_bin_path('unmount', True)
if not module.check_mode:
rc, unmount_out, err = module.run_command([unmount_cmd, filesystem])
if rc != 0:
module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
else:
changed = True
msg = "File system %s unmounted." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
account_subsystem=dict(type='bool', default=False),
attributes=dict(type='list', elements='str', default=["agblksize=4096", "isnapshot=no"]),
auto_mount=dict(type='bool', default=True),
device=dict(type='str'),
filesystem=dict(type='str', required=True),
fs_type=dict(type='str', default='jfs2'),
permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
mount_group=dict(type='str'),
nfs_server=dict(type='str'),
rm_mount_point=dict(type='bool', default=False),
size=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
vg=dict(type='str'),
),
supports_check_mode=True,
)
account_subsystem = module.params['account_subsystem']
attributes = module.params['attributes']
auto_mount = module.params['auto_mount']
device = module.params['device']
fs_type = module.params['fs_type']
permissions = module.params['permissions']
mount_group = module.params['mount_group']
filesystem = module.params['filesystem']
nfs_server = module.params['nfs_server']
rm_mount_point = module.params['rm_mount_point']
size = module.params['size']
state = module.params['state']
vg = module.params['vg']
result = dict(
changed=False,
msg='',
)
if state == 'present':
fs_mounted = ismount(filesystem)
fs_exists = _fs_exists(module, filesystem)
# Check if fs is mounted or exists.
if fs_mounted or fs_exists:
result['msg'] = "File system %s already exists." % filesystem
result['changed'] = False
# If parameter size was passed, resize fs.
if size is not None:
result['changed'], result['msg'] = resize_fs(module, filesystem, size)
# If fs doesn't exist, create it.
else:
# Check if fs will be a NFS device.
if nfs_server is not None:
if device is None:
result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
module.fail_json(**result)
else:
# Create a fs from NFS export.
if _check_nfs_device(module, nfs_server, device):
result['changed'], result['msg'] = create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
if device is None:
if vg is None:
result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
module.fail_json(**result)
else:
# Create a fs from
result['changed'], result['msg'] = create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
if device is not None and nfs_server is None:
# Create a fs from a previously lv device.
result['changed'], result['msg'] = create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
elif state == 'absent':
if ismount(filesystem):
result['msg'] = "File system %s mounted." % filesystem
else:
fs_status = _fs_exists(module, filesystem)
if not fs_status:
result['msg'] = "File system %s does not exist." % filesystem
else:
result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
elif state == 'mounted':
if ismount(filesystem):
result['changed'] = False
result['msg'] = "File system %s already mounted." % filesystem
else:
result['changed'], result['msg'] = mount_fs(module, filesystem)
elif state == 'unmounted':
if not ismount(filesystem):
result['changed'] = False
result['msg'] = "File system %s already unmounted." % filesystem
else:
result['changed'], result['msg'] = unmount_fs(module, filesystem)
else:
# Unreachable codeblock
result['msg'] = "Unexpected state %s." % state
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,255 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Joris Weijters <joris.weijters@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author:
- Joris Weijters (@molekuul)
module: aix_inittab
short_description: Manages the inittab on AIX
description:
- Manages the inittab on AIX.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- Name of the inittab entry.
type: str
required: true
aliases: [ service ]
runlevel:
description:
- Runlevel of the entry.
type: str
required: true
action:
description:
- Action what the init has to do with this entry.
type: str
choices:
- boot
- bootwait
- hold
- initdefault
- 'off'
- once
- ondemand
- powerfail
- powerwait
- respawn
- sysinit
- wait
command:
description:
- What command has to run.
type: str
required: true
insertafter:
description:
- After which inittabline should the new entry inserted.
type: str
state:
description:
- Whether the entry should be present or absent in the inittab file.
type: str
choices: [ absent, present ]
default: present
notes:
- The changes are persistent across reboots.
- You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
- Tested on AIX 7.1.
requirements:
- itertools
'''
EXAMPLES = '''
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
community.general.aix_inittab:
name: startmyservice
runlevel: 4
action: once
command: echo hello
insertafter: existingservice
state: present
become: true
# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
- name: Change startmyservice to inittab
community.general.aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: echo hello
state: present
become: true
- name: Remove startmyservice from inittab
community.general.aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: echo hello
state: absent
become: true
'''
RETURN = '''
name:
description: Name of the adjusted inittab entry
returned: always
type: str
sample: startmyservice
msg:
description: Action done with the inittab entry
returned: changed
type: str
sample: changed inittab entry startmyservice
changed:
description: Whether the inittab changed or not
returned: always
type: bool
sample: true
'''
# Import necessary libraries
try:
# python 2
from itertools import izip
except ImportError:
izip = zip
from ansible.module_utils.basic import AnsibleModule
# end import modules
# start defining the functions
def check_current_entry(module):
# Check if entry exists, if not return False in exists in return dict,
# if true return True and the entry in return dict
existsdict = {'exist': False}
lsitab = module.get_bin_path('lsitab')
(rc, out, err) = module.run_command([lsitab, module.params['name']])
if rc == 0:
keys = ('name', 'runlevel', 'action', 'command')
values = out.split(":")
# strip non readable characters as \n
values = map(lambda s: s.strip(), values)
existsdict = dict(izip(keys, values))
existsdict.update({'exist': True})
return existsdict
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['service']),
runlevel=dict(type='str', required=True),
action=dict(type='str', choices=[
'boot',
'bootwait',
'hold',
'initdefault',
'off',
'once',
'ondemand',
'powerfail',
'powerwait',
'respawn',
'sysinit',
'wait',
]),
command=dict(type='str', required=True),
insertafter=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
result = {
'name': module.params['name'],
'changed': False,
'msg': ""
}
# Find commandline strings
mkitab = module.get_bin_path('mkitab')
rmitab = module.get_bin_path('rmitab')
chitab = module.get_bin_path('chitab')
rc = 0
# check if the new entry exists
current_entry = check_current_entry(module)
# if action is install or change,
if module.params['state'] == 'present':
# create new entry string
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
# exists, then the entry will be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
module.params['command'] != current_entry['command']):
# If the entry does exist then change the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command([chitab, new_entry])
if rc != 0:
module.fail_json(
msg="could not change inittab", rc=rc, err=err)
result['msg'] = "changed inittab entry" + " " + current_entry['name']
result['changed'] = True
# If the entry does not exist create the entry
elif not current_entry['exist']:
if module.params['insertafter']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, '-i', module.params['insertafter'], new_entry])
else:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, new_entry])
if rc != 0:
module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
result['msg'] = "add inittab entry" + " " + module.params['name']
result['changed'] = True
elif module.params['state'] == 'absent':
# If the action is remove and the entry exists then remove the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[rmitab, module.params['name']])
if rc != 0:
module.fail_json(
msg="could not remove entry from inittab)", rc=rc, err=err)
result['msg'] = "removed inittab entry" + " " + current_entry['name']
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,371 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author:
- Kairo Araujo (@kairoaraujo)
module: aix_lvg
short_description: Manage LVM volume groups on AIX
description:
- This module creates, removes or resize volume groups on AIX LVM.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
force:
description:
- Force volume group creation.
type: bool
default: false
pp_size:
description:
- The size of the physical partition in megabytes.
type: int
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group.
- Required when creating or extending (V(present) state) the volume group.
- If not informed reducing (V(absent) state) the volume group will be removed.
type: list
elements: str
state:
description:
- Control if the volume group exists and volume group AIX state varyonvg V(varyon) or varyoffvg V(varyoff).
type: str
choices: [ absent, present, varyoff, varyon ]
default: present
vg:
description:
- The name of the volume group.
type: str
required: true
vg_type:
description:
- The type of the volume group.
type: str
choices: [ big, normal, scalable ]
default: normal
notes:
- AIX will permit remove VG only if all LV/Filesystems are not busy.
- Module does not modify PP size for already present volume group.
'''
EXAMPLES = r'''
- name: Create a volume group datavg
community.general.aix_lvg:
vg: datavg
pp_size: 128
vg_type: scalable
state: present
- name: Removing a volume group datavg
community.general.aix_lvg:
vg: datavg
state: absent
- name: Extending rootvg
community.general.aix_lvg:
vg: rootvg
pvs: hdisk1
state: present
- name: Reducing rootvg
community.general.aix_lvg:
vg: rootvg
pvs: hdisk1
state: absent
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
def _validate_pv(module, vg, pvs):
"""
Function to validate if the physical volume (PV) is not already in use by
another volume group or Oracle ASM.
:param module: Ansible module argument spec.
:param vg: Volume group name.
:param pvs: Physical volume list.
:return: [bool, message] or module.fail_json for errors.
"""
lspv_cmd = module.get_bin_path('lspv', True)
rc, current_lspv, stderr = module.run_command([lspv_cmd])
if rc != 0:
module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
for pv in pvs:
# Get pv list.
lspv_list = {}
for line in current_lspv.splitlines():
pv_data = line.split()
lspv_list[pv_data[0]] = pv_data[2]
# Check if pv exists and is free.
if pv not in lspv_list.keys():
module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
if lspv_list[pv] == 'None':
# Disk None, looks free.
# Check if PV is not already in use by Oracle ASM.
lquerypv_cmd = module.get_bin_path('lquerypv', True)
rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", "/dev/%s" % pv, "20", "10"])
if rc != 0:
module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
if 'ORCLDISK' in current_lquerypv:
module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
msg = "Physical volume '%s' is ok to be used." % pv
return True, msg
# Check if PV is already in use for the same vg.
elif vg != lspv_list[pv]:
module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
return False, msg
def _validate_vg(module, vg):
"""
Check the current state of volume group.
:param module: Ansible module argument spec.
:param vg: Volume Group name.
:return: True (VG in varyon state) or False (VG in varyoff state) or
None (VG does not exist), message.
"""
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"])
if rc != 0:
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
rc, current_all_vgs, err = module.run_command([lsvg_cmd])
if rc != 0:
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
if vg in current_all_vgs and vg not in current_active_vgs:
msg = "Volume group '%s' is in varyoff state." % vg
return False, msg
if vg in current_active_vgs:
msg = "Volume group '%s' is in varyon state." % vg
return True, msg
msg = "Volume group '%s' does not exist." % vg
return None, msg
def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
""" Creates or extend a volume group. """
# Command option parameters.
force_opt = {
True: '-f',
False: ''
}
vg_opt = {
'normal': '',
'big': '-B',
'scalable': '-S',
}
# Validate if PV are not already in use.
pv_state, msg = _validate_pv(module, vg, pvs)
if not pv_state:
changed = False
return changed, msg
vg_state, msg = vg_validation
if vg_state is False:
changed = False
return changed, msg
elif vg_state is True:
# Volume group extension.
changed = True
msg = ""
if not module.check_mode:
extendvg_cmd = module.get_bin_path('extendvg', True)
rc, output, err = module.run_command([extendvg_cmd, vg] + pvs)
if rc != 0:
changed = False
msg = "Extending volume group '%s' has failed." % vg
return changed, msg
msg = "Volume group '%s' extended." % vg
return changed, msg
elif vg_state is None:
# Volume group creation.
changed = True
msg = ''
if not module.check_mode:
mkvg_cmd = module.get_bin_path('mkvg', True)
rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs)
if rc != 0:
changed = False
msg = "Creating volume group '%s' failed." % vg
return changed, msg
msg = "Volume group '%s' created." % vg
return changed, msg
def reduce_vg(module, vg, pvs, vg_validation):
vg_state, msg = vg_validation
if vg_state is False:
changed = False
return changed, msg
elif vg_state is None:
changed = False
return changed, msg
# Define pvs_to_remove (list of physical volumes to be removed).
if pvs is None:
# Remove VG if pvs are note informed.
# Remark: AIX will permit remove only if the VG has not LVs.
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg])
if rc != 0:
module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
pvs_to_remove = []
for line in current_pvs.splitlines()[2:]:
pvs_to_remove.append(line.split()[0])
reduce_msg = "Volume group '%s' removed." % vg
else:
pvs_to_remove = pvs
reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
# Reduce volume group.
if len(pvs_to_remove) <= 0:
changed = False
msg = "No physical volumes to remove."
return changed, msg
changed = True
msg = ''
if not module.check_mode:
reducevg_cmd = module.get_bin_path('reducevg', True)
rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove)
if rc != 0:
module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
msg = reduce_msg
return changed, msg
def state_vg(module, vg, state, vg_validation):
vg_state, msg = vg_validation
if vg_state is None:
module.fail_json(msg=msg)
if state == 'varyon':
if vg_state is True:
changed = False
return changed, msg
changed = True
msg = ''
if not module.check_mode:
varyonvg_cmd = module.get_bin_path('varyonvg', True)
rc, varyonvg_out, err = module.run_command([varyonvg_cmd, vg])
if rc != 0:
module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
msg = "Varyon volume group %s completed." % vg
return changed, msg
elif state == 'varyoff':
if vg_state is False:
changed = False
return changed, msg
changed = True
msg = ''
if not module.check_mode:
varyonvg_cmd = module.get_bin_path('varyoffvg', True)
rc, varyonvg_out, stderr = module.run_command([varyonvg_cmd, vg])
if rc != 0:
module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
msg = "Varyoff volume group %s completed." % vg
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(type='bool', default=False),
pp_size=dict(type='int'),
pvs=dict(type='list', elements='str'),
state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
vg=dict(type='str', required=True),
vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
),
supports_check_mode=True,
)
force = module.params['force']
pp_size = module.params['pp_size']
pvs = module.params['pvs']
state = module.params['state']
vg = module.params['vg']
vg_type = module.params['vg_type']
if pp_size is None:
pp_size = ''
else:
pp_size = "-s %s" % pp_size
vg_validation = _validate_vg(module, vg)
if state == 'present':
if not pvs:
changed = False
msg = "pvs is required to state 'present'."
module.fail_json(msg=msg)
else:
changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
elif state == 'absent':
changed, msg = reduce_vg(module, vg, pvs, vg_validation)
elif state == 'varyon' or state == 'varyoff':
changed, msg = state_vg(module, vg, state, vg_validation)
else:
changed = False
msg = "Unexpected state"
module.exit_json(changed=changed, msg=msg, state=state)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,344 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author:
- Alain Dejoux (@adejoux)
module: aix_lvol
short_description: Configure AIX LVM logical volumes
description:
- This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
vg:
description:
- The volume group this logical volume is part of.
type: str
required: true
lv:
description:
- The name of the logical volume.
type: str
required: true
lv_type:
description:
- The type of the logical volume.
type: str
default: jfs2
size:
description:
- The size of the logical volume with one of the [MGT] units.
type: str
copies:
description:
- The number of copies of the logical volume.
- Maximum copies are 3.
type: int
default: 1
policy:
description:
- Sets the interphysical volume allocation policy.
- V(maximum) allocates logical partitions across the maximum number of physical volumes.
- V(minimum) allocates logical partitions across the minimum number of physical volumes.
type: str
choices: [ maximum, minimum ]
default: maximum
state:
description:
- Control if the logical volume exists. If V(present) and the
volume does not already exist then the O(size) option is required.
type: str
choices: [ absent, present ]
default: present
opts:
description:
- Free-form options to be passed to the mklv command.
type: str
default: ''
pvs:
description:
- A list of physical volumes, for example V(hdisk1,hdisk2).
type: list
elements: str
default: []
'''
EXAMPLES = r'''
- name: Create a logical volume of 512M
community.general.aix_lvol:
vg: testvg
lv: testlv
size: 512M
- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
community.general.aix_lvol:
vg: testvg
lv: test2lv
size: 512M
pvs: [ hdisk1, hdisk2 ]
- name: Create a logical volume of 512M mirrored
community.general.aix_lvol:
vg: testvg
lv: test3lv
size: 512M
copies: 2
- name: Create a logical volume of 1G with a minimum placement policy
community.general.aix_lvol:
vg: rootvg
lv: test4lv
size: 1G
policy: minimum
- name: Create a logical volume with special options like mirror pool
community.general.aix_lvol:
vg: testvg
lv: testlv
size: 512M
opts: -p copy1=poolA -p copy2=poolB
- name: Extend the logical volume to 1200M
community.general.aix_lvol:
vg: testvg
lv: test4lv
size: 1200M
- name: Remove the logical volume
community.general.aix_lvol:
vg: testvg
lv: testlv
state: absent
'''
RETURN = r'''
msg:
type: str
description: A friendly message describing the task result.
returned: always
sample: Logical volume testlv created.
'''
import re
from ansible.module_utils.basic import AnsibleModule
def convert_size(module, size):
unit = size[-1].upper()
units = ['M', 'G', 'T']
try:
multiplier = 1024 ** units.index(unit)
except ValueError:
module.fail_json(msg="No valid size unit specified.")
return int(size[:-1]) * multiplier
def round_ppsize(x, base=16):
new_size = int(base * round(float(x) / base))
if new_size < x:
new_size += base
return new_size
def parse_lv(data):
name = None
for line in data.splitlines():
match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
vg = match.group(2)
continue
match = re.search(r"LPs:\s+(\d+).*PPs", line)
if match is not None:
lps = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"INTER-POLICY:\s+(\w+)", line)
if match is not None:
policy = match.group(1)
continue
if not name:
return None
size = lps * pp_size
return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
def parse_vg(data):
for line in data.splitlines():
match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
continue
match = re.search(r"TOTAL PP.*\((\d+)", line)
if match is not None:
size = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"FREE PP.*\((\d+)", line)
if match is not None:
free = int(match.group(1))
continue
return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str', required=True),
lv_type=dict(type='str', default='jfs2'),
size=dict(type='str'),
opts=dict(type='str', default=''),
copies=dict(type='int', default=1),
state=dict(type='str', default='present', choices=['absent', 'present']),
policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
pvs=dict(type='list', elements='str', default=list())
),
supports_check_mode=True,
)
vg = module.params['vg']
lv = module.params['lv']
lv_type = module.params['lv_type']
size = module.params['size']
opts = module.params['opts']
copies = module.params['copies']
policy = module.params['policy']
state = module.params['state']
pvs = module.params['pvs']
if policy == 'maximum':
lv_policy = 'x'
else:
lv_policy = 'm'
# Add echo command when running in check-mode
if module.check_mode:
test_opt = [module.get_bin_path("echo", required=True)]
else:
test_opt = []
# check if system commands are available
lsvg_cmd = module.get_bin_path("lsvg", required=True)
lslv_cmd = module.get_bin_path("lslv", required=True)
# Get information on volume group requested
rc, vg_info, err = module.run_command([lsvg_cmd, vg])
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
this_vg = parse_vg(vg_info)
if size is not None:
# Calculate pp size and round it up based on pp size.
lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
# Get information on logical volume requested
rc, lv_info, err = module.run_command([lslv_cmd, lv])
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
changed = False
this_lv = parse_lv(lv_info)
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
if this_lv is None:
if state == 'present':
if lv_size > this_vg['free']:
module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
# create LV
mklv_cmd = module.get_bin_path("mklv", required=True)
cmd = test_opt + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, "%sM" % (lv_size, )] + pvs
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s created." % lv)
else:
module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
else:
if state == 'absent':
# remove LV
rmlv_cmd = module.get_bin_path("rmlv", required=True)
rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv['name']])
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
else:
module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
else:
if this_lv['policy'] != policy:
# change lv allocation policy
chlv_cmd = module.get_bin_path("chlv", required=True)
rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv['name']])
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
else:
module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
if vg != this_lv['vg']:
module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
# from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
if not size:
module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
# resize LV based on absolute values
if int(lv_size) > this_lv['size']:
extendlv_cmd = module.get_bin_path("extendlv", required=True)
cmd = test_opt + [extendlv_cmd, lv, "%sM" % (lv_size - this_lv['size'], )]
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
else:
module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
elif lv_size < this_lv['size']:
module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
else:
module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,207 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2022, Christian Wollinger <@cwollinger>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: alerta_customer
short_description: Manage customers in Alerta
version_added: 4.8.0
description:
- Create or delete customers in Alerta with the REST API.
author: Christian Wollinger (@cwollinger)
seealso:
- name: API documentation
description: Documentation for Alerta API
link: https://docs.alerta.io/api/reference.html#customers
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
customer:
description:
- Name of the customer.
required: true
type: str
match:
description:
- The matching logged in user for the customer.
required: true
type: str
alerta_url:
description:
- The Alerta API endpoint.
required: true
type: str
api_username:
description:
- The username for the API using basic auth.
type: str
api_password:
description:
- The password for the API using basic auth.
type: str
api_key:
description:
- The access token for the API.
type: str
state:
description:
- Whether the customer should exist or not.
- Both O(customer) and O(match) identify a customer that should be added or removed.
type: str
choices: [ absent, present ]
default: present
'''
EXAMPLES = """
- name: Create customer
community.general.alerta_customer:
alerta_url: https://alerta.example.com
api_username: admin@example.com
api_password: password
customer: Developer
match: dev@example.com
- name: Delete customer
community.general.alerta_customer:
alerta_url: https://alerta.example.com
api_username: admin@example.com
api_password: password
customer: Developer
match: dev@example.com
state: absent
"""
RETURN = """
msg:
description:
- Success or failure message.
returned: always
type: str
sample: Customer customer1 created
response:
description:
- The response from the API.
returned: always
type: dict
"""
from ansible.module_utils.urls import fetch_url, basic_auth_header
from ansible.module_utils.basic import AnsibleModule
class AlertaInterface(object):
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.customer = module.params['customer']
self.match = module.params['match']
self.alerta_url = module.params['alerta_url']
self.headers = {"Content-Type": "application/json"}
if module.params.get('api_key', None):
self.headers["Authorization"] = "Key %s" % module.params['api_key']
else:
self.headers["Authorization"] = basic_auth_header(module.params['api_username'], module.params['api_password'])
def send_request(self, url, data=None, method="GET"):
response, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
status_code = info["status"]
if status_code == 401:
self.module.fail_json(failed=True, response=info, msg="Unauthorized to request '%s' on '%s'" % (method, url))
elif status_code == 403:
self.module.fail_json(failed=True, response=info, msg="Permission Denied for '%s' on '%s'" % (method, url))
elif status_code == 404:
self.module.fail_json(failed=True, response=info, msg="Not found for request '%s' on '%s'" % (method, url))
elif status_code in (200, 201):
return self.module.from_json(response.read())
self.module.fail_json(failed=True, response=info, msg="Alerta API error with HTTP %d for %s" % (status_code, url))
def get_customers(self):
url = "%s/api/customers" % self.alerta_url
response = self.send_request(url)
pages = response["pages"]
if pages > 1:
for page in range(2, pages + 1):
page_url = url + '?page=' + str(page)
new_results = self.send_request(page_url)
response.update(new_results)
return response
def create_customer(self):
url = "%s/api/customer" % self.alerta_url
payload = {
'customer': self.customer,
'match': self.match,
}
payload = self.module.jsonify(payload)
response = self.send_request(url, payload, 'POST')
return response
def delete_customer(self, id):
url = "%s/api/customer/%s" % (self.alerta_url, id)
response = self.send_request(url, None, 'DELETE')
return response
def find_customer_id(self, customer):
for i in customer['customers']:
if self.customer == i['customer'] and self.match == i['match']:
return i['id']
return None
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
customer=dict(type='str', required=True),
match=dict(type='str', required=True),
alerta_url=dict(type='str', required=True),
api_username=dict(type='str'),
api_password=dict(type='str', no_log=True),
api_key=dict(type='str', no_log=True),
),
required_together=[['api_username', 'api_password']],
mutually_exclusive=[['api_username', 'api_key']],
supports_check_mode=True
)
alerta_iface = AlertaInterface(module)
if alerta_iface.state == 'present':
response = alerta_iface.get_customers()
if alerta_iface.find_customer_id(response):
module.exit_json(changed=False, response=response, msg="Customer %s already exists" % alerta_iface.customer)
else:
if not module.check_mode:
response = alerta_iface.create_customer()
module.exit_json(changed=True, response=response, msg="Customer %s created" % alerta_iface.customer)
else:
response = alerta_iface.get_customers()
id = alerta_iface.find_customer_id(response)
if id:
if not module.check_mode:
alerta_iface.delete_customer(id)
module.exit_json(changed=True, response=response, msg="Customer %s with id %s deleted" % (alerta_iface.customer, id))
else:
module.exit_json(changed=False, response=response, msg="Customer %s does not exists" % alerta_iface.customer)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,406 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: ali_instance_info
short_description: Gather information on instances of Alibaba Cloud ECS
description:
- This module fetches data from the Open API in Alicloud.
The module must be called from within the ECS instance itself.
attributes:
check_mode:
version_added: 3.3.0
# This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
name_prefix:
description:
- Use a instance name prefix to filter ecs instances.
type: str
version_added: '0.2.0'
tags:
description:
- A hash/dictionaries of instance tags. C({"key":"value"})
aliases: ["instance_tags"]
type: dict
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
Filter keys can be same as request parameter name or be lower case and use underscore (V("_")) or dash (V("-")) to
connect different words in one parameter. C(InstanceIds) should be a list.
C(Tag.n.Key) and C(Tag.n.Value) should be a dict and using O(tags) instead.
type: dict
version_added: '0.2.0'
author:
- "He Guimin (@xiaozhu36)"
requirements:
- "Python >= 3.6"
- "footmark >= 1.13.0"
extends_documentation_fragment:
- community.general.alicloud
- community.general.attributes
- community.general.attributes.info_module
'''
EXAMPLES = '''
# Fetch instances details according to setting different filters
- name: Find all instances in the specified region
community.general.ali_instance_info:
register: all_instances
- name: Find all instances based on the specified ids
community.general.ali_instance_info:
instance_ids:
- "i-35b333d9"
- "i-ddav43kd"
register: instances_by_ids
- name: Find all instances based on the specified name_prefix
community.general.ali_instance_info:
name_prefix: "ecs_instance_"
register: instances_by_name_prefix
- name: Find instances based on tags
community.general.ali_instance_info:
tags:
Test: "add"
'''
RETURN = '''
instances:
description: List of ECS instances
returned: always
type: complex
contains:
availability_zone:
description: The availability zone of the instance is in.
returned: always
type: str
sample: cn-beijing-a
block_device_mappings:
description: Any block device mapping entries for the instance.
returned: always
type: complex
contains:
device_name:
description: The device name exposed to the instance (for example, /dev/xvda).
returned: always
type: str
sample: /dev/xvda
attach_time:
description: The time stamp when the attachment initiated.
returned: always
type: str
sample: "2018-06-25T04:08:26Z"
delete_on_termination:
description: Indicates whether the volume is deleted on instance termination.
returned: always
type: bool
sample: true
status:
description: The attachment state.
returned: always
type: str
sample: in_use
volume_id:
description: The ID of the cloud disk.
returned: always
type: str
sample: d-2zei53pjsi117y6gf9t6
cpu:
description: The CPU core count of the instance.
returned: always
type: int
sample: 4
creation_time:
description: The time the instance was created.
returned: always
type: str
sample: "2018-06-25T04:08Z"
description:
description: The instance description.
returned: always
type: str
sample: "my ansible instance"
eip:
description: The attribution of EIP associated with the instance.
returned: always
type: complex
contains:
allocation_id:
description: The ID of the EIP.
returned: always
type: str
sample: eip-12345
internet_charge_type:
description: The internet charge type of the EIP.
returned: always
type: str
sample: "paybybandwidth"
ip_address:
description: EIP address.
returned: always
type: str
sample: 42.10.2.2
expired_time:
description: The time the instance will expire.
returned: always
type: str
sample: "2099-12-31T15:59Z"
gpu:
description: The attribution of instance GPU.
returned: always
type: complex
contains:
amount:
description: The count of the GPU.
returned: always
type: int
sample: 0
spec:
description: The specification of the GPU.
returned: always
type: str
sample: ""
host_name:
description: The host name of the instance.
returned: always
type: str
sample: iZ2zewaoZ
id:
description: Alias of instance_id.
returned: always
type: str
sample: i-abc12345
instance_id:
description: ECS instance resource ID.
returned: always
type: str
sample: i-abc12345
image_id:
description: The ID of the image used to launch the instance.
returned: always
type: str
sample: m-0011223344
inner_ip_address:
description: The inner IPv4 address of the classic instance.
returned: always
type: str
sample: 10.0.0.2
instance_charge_type:
description: The instance charge type.
returned: always
type: str
sample: PostPaid
instance_name:
description: The name of the instance.
returned: always
type: str
sample: my-ecs
instance_type_family:
description: The instance type family of the instance belongs.
returned: always
type: str
sample: ecs.sn1ne
instance_type:
description: The instance type of the running instance.
returned: always
type: str
sample: ecs.sn1ne.xlarge
internet_charge_type:
description: The billing method of the network bandwidth.
returned: always
type: str
sample: PayByBandwidth
internet_max_bandwidth_in:
description: Maximum incoming bandwidth from the internet network.
returned: always
type: int
sample: 200
internet_max_bandwidth_out:
description: Maximum incoming bandwidth from the internet network.
returned: always
type: int
sample: 20
io_optimized:
description: Indicates whether the instance is optimized for EBS I/O.
returned: always
type: bool
sample: false
memory:
description: Memory size of the instance.
returned: always
type: int
sample: 8192
network_interfaces:
description: One or more network interfaces for the instance.
returned: always
type: complex
contains:
mac_address:
description: The MAC address.
returned: always
type: str
sample: "00:11:22:33:44:55"
network_interface_id:
description: The ID of the network interface.
returned: always
type: str
sample: eni-01234567
primary_ip_address:
description: The primary IPv4 address of the network interface within the vswitch.
returned: always
type: str
sample: 10.0.0.1
osname:
description: The operation system name of the instance owned.
returned: always
type: str
sample: CentOS
ostype:
description: The operation system type of the instance owned.
returned: always
type: str
sample: linux
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
type: str
sample: 10.0.0.1
public_ip_address:
description: The public IPv4 address assigned to the instance or eip address
returned: always
type: str
sample: 43.0.0.1
resource_group_id:
description: The id of the resource group to which the instance belongs.
returned: always
type: str
sample: my-ecs-group
security_groups:
description: One or more security groups for the instance.
returned: always
type: list
elements: dict
contains:
group_id:
description: The ID of the security group.
returned: always
type: str
sample: sg-0123456
group_name:
description: The name of the security group.
returned: always
type: str
sample: my-security-group
status:
description: The current status of the instance.
returned: always
type: str
sample: running
tags:
description: Any tags assigned to the instance.
returned: always
type: dict
sample:
vswitch_id:
description: The ID of the vswitch in which the instance is running.
returned: always
type: str
sample: vsw-dew00abcdef
vpc_id:
description: The ID of the VPC the instance is in.
returned: always
type: str
sample: vpc-0011223344
ids:
description: List of ECS instance IDs
returned: always
type: list
sample: [i-12345er, i-3245fs]
'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import (
ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK
)
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
name_prefix=dict(type='str'),
tags=dict(type='dict', aliases=['instance_tags']),
filters=dict(type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if HAS_FOOTMARK is False:
module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
ecs = ecs_connect(module)
instances = []
instance_ids = []
ids = []
name_prefix = module.params['name_prefix']
filters = module.params['filters']
if not filters:
filters = {}
for key, value in list(filters.items()):
if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
for id in value:
if id not in ids:
ids.append(value)
if ids:
filters['instance_ids'] = ids
if module.params['tags']:
filters['tags'] = module.params['tags']
for inst in ecs.describe_instances(**filters):
if name_prefix:
if not str(inst.instance_name).startswith(name_prefix):
continue
volumes = ecs.describe_disks(instance_id=inst.id)
setattr(inst, 'block_device_mappings', volumes)
setattr(inst, 'user_data', inst.describe_user_data())
instances.append(inst.read())
instance_ids.append(inst.id)
module.exit_json(changed=False, ids=instance_ids, instances=instances)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,407 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
# Copyright (c) 2015, David Wittman <dwittman@gmail.com>
# Copyright (c) 2022, Marius Rieder <marius.rieder@scs.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool.
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
author:
- Marius Rieder (@jiuka)
- David Wittman (@DavidWittman)
- Gabe Mulley (@mulby)
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
options:
name:
description:
- The generic name of the link.
type: str
required: true
path:
description:
- The path to the real executable that the link should point to.
type: path
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is always required on RHEL-based distributions. On Debian-based distributions this option is
required when the alternative O(name) is unknown to the system.
type: path
priority:
description:
- The priority of the alternative. If no priority is given for creation V(50) is used as a fallback.
type: int
state:
description:
- V(present) - install the alternative (if not already installed), but do
not set it as the currently selected alternative for the group.
- V(selected) - install the alternative (if not already installed), and
set it as the currently selected alternative for the group.
- V(auto) - install the alternative (if not already installed), and
set the group to auto mode. Added in community.general 5.1.0.
- V(absent) - removes the alternative. Added in community.general 5.1.0.
choices: [ present, selected, auto, absent ]
default: selected
type: str
version_added: 4.8.0
subcommands:
description:
- A list of subcommands.
- Each subcommand needs a name, a link and a path parameter.
- Subcommands are also named 'slaves' or 'followers', depending on the version
of alternatives.
type: list
elements: dict
aliases: ['slaves']
suboptions:
name:
description:
- The generic name of the subcommand.
type: str
required: true
path:
description:
- The path to the real executable that the subcommand should point to.
type: path
required: true
link:
description:
- The path to the symbolic link that should point to the real subcommand executable.
type: path
required: true
version_added: 5.1.0
requirements: [ update-alternatives ]
'''
EXAMPLES = r'''
- name: Correct java version selected
community.general.alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: Alternatives link created
community.general.alternatives:
name: hadoop-conf
link: /etc/hadoop/conf
path: /etc/hadoop/conf.ansible
- name: Make java 32 bit an alternative with low priority
community.general.alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
priority: -10
- name: Install Python 3.5 but do not select it
community.general.alternatives:
name: python
path: /usr/bin/python3.5
link: /usr/bin/python
state: present
- name: Install Python 3.5 and reset selection to auto
community.general.alternatives:
name: python
path: /usr/bin/python3.5
link: /usr/bin/python
state: auto
- name: keytool is a subcommand of java
community.general.alternatives:
name: java
link: /usr/bin/java
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
subcommands:
- name: keytool
link: /usr/bin/keytool
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
class AlternativeState:
PRESENT = "present"
SELECTED = "selected"
ABSENT = "absent"
AUTO = "auto"
@classmethod
def to_list(cls):
return [cls.PRESENT, cls.SELECTED, cls.ABSENT, cls.AUTO]
class AlternativesModule(object):
_UPDATE_ALTERNATIVES = None
def __init__(self, module):
self.module = module
self.result = dict(changed=False, diff=dict(before=dict(), after=dict()))
self.module.run_command_environ_update = {'LC_ALL': 'C'}
self.messages = []
self.run()
@property
def mode_present(self):
return self.module.params.get('state') in [AlternativeState.PRESENT, AlternativeState.SELECTED, AlternativeState.AUTO]
@property
def mode_selected(self):
return self.module.params.get('state') == AlternativeState.SELECTED
@property
def mode_auto(self):
return self.module.params.get('state') == AlternativeState.AUTO
def run(self):
self.parse()
if self.mode_present:
# Check if we need to (re)install
subcommands_parameter = self.module.params['subcommands']
priority_parameter = self.module.params['priority']
if (
self.path not in self.current_alternatives or
(priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or
(subcommands_parameter is not None and (
not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or
not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter)
))
):
self.install()
# Check if we need to set the preference
if self.mode_selected and self.current_path != self.path:
self.set()
# Check if we need to reset to auto
if self.mode_auto and self.current_mode == 'manual':
self.auto()
else:
# Check if we need to uninstall
if self.path in self.current_alternatives:
self.remove()
self.result['msg'] = ' '.join(self.messages)
self.module.exit_json(**self.result)
def install(self):
if not os.path.exists(self.path):
self.module.fail_json(msg="Specified path %s does not exist" % self.path)
if not self.link:
self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link')
cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)]
if self.module.params['subcommands'] is not None:
subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands]
cmd += [item for sublist in subcommands for item in sublist]
self.result['changed'] = True
self.messages.append("Install alternative '%s' for '%s'." % (self.path, self.name))
if not self.module.check_mode:
self.module.run_command(cmd, check_rc=True)
if self.module._diff:
self.result['diff']['after'] = dict(
state=AlternativeState.PRESENT,
path=self.path,
priority=self.priority,
link=self.link,
)
if self.subcommands:
self.result['diff']['after'].update(dict(
subcommands=self.subcommands
))
def remove(self):
cmd = [self.UPDATE_ALTERNATIVES, '--remove', self.name, self.path]
self.result['changed'] = True
self.messages.append("Remove alternative '%s' from '%s'." % (self.path, self.name))
if not self.module.check_mode:
self.module.run_command(cmd, check_rc=True)
if self.module._diff:
self.result['diff']['after'] = dict(state=AlternativeState.ABSENT)
def set(self):
cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, self.path]
self.result['changed'] = True
self.messages.append("Set alternative '%s' for '%s'." % (self.path, self.name))
if not self.module.check_mode:
self.module.run_command(cmd, check_rc=True)
if self.module._diff:
self.result['diff']['after']['state'] = AlternativeState.SELECTED
def auto(self):
cmd = [self.UPDATE_ALTERNATIVES, '--auto', self.name]
self.messages.append("Set alternative to auto for '%s'." % (self.name))
self.result['changed'] = True
if not self.module.check_mode:
self.module.run_command(cmd, check_rc=True)
if self.module._diff:
self.result['diff']['after']['state'] = AlternativeState.PRESENT
@property
def name(self):
return self.module.params.get('name')
@property
def path(self):
return self.module.params.get('path')
@property
def link(self):
return self.module.params.get('link') or self.current_link
@property
def priority(self):
if self.module.params.get('priority') is not None:
return self.module.params.get('priority')
return self.current_alternatives.get(self.path, {}).get('priority', 50)
@property
def subcommands(self):
if self.module.params.get('subcommands') is not None:
return self.module.params.get('subcommands')
elif self.path in self.current_alternatives and self.current_alternatives[self.path].get('subcommands'):
return self.current_alternatives[self.path].get('subcommands')
return None
@property
def UPDATE_ALTERNATIVES(self):
if self._UPDATE_ALTERNATIVES is None:
self._UPDATE_ALTERNATIVES = self.module.get_bin_path('update-alternatives', True)
return self._UPDATE_ALTERNATIVES
def parse(self):
self.current_mode = None
self.current_path = None
self.current_link = None
self.current_alternatives = {}
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, dummy) = self.module.run_command(
[self.UPDATE_ALTERNATIVES, '--display', self.name]
)
if rc != 0:
self.module.debug("No current alternative found. '%s' exited with %s" % (self.UPDATE_ALTERNATIVES, rc))
return
current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE)
current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE)
current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE)
subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE)
subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE)
match = current_mode_regex.search(display_output)
if not match:
self.module.debug("No current mode found in output")
return
self.current_mode = match.group(1)
match = current_path_regex.search(display_output)
if not match:
self.module.debug("No current path found in output")
else:
self.current_path = match.group(1)
match = current_link_regex.search(display_output)
if not match:
self.module.debug("No current link found in output")
else:
self.current_link = match.group(1)
subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output))
if not subcmd_path_map and self.subcommands:
subcmd_path_map = dict((s['name'], s['link']) for s in self.subcommands)
for path, prio, subcmd in alternative_regex.findall(display_output):
self.current_alternatives[path] = dict(
priority=int(prio),
subcommands=[dict(
name=name,
path=spath,
link=subcmd_path_map.get(name)
) for name, spath in subcmd_regex.findall(subcmd) if spath != '(null)']
)
if self.module._diff:
if self.path in self.current_alternatives:
self.result['diff']['before'].update(dict(
state=AlternativeState.PRESENT,
path=self.path,
priority=self.current_alternatives[self.path].get('priority'),
link=self.current_link,
))
if self.current_alternatives[self.path].get('subcommands'):
self.result['diff']['before'].update(dict(
subcommands=self.current_alternatives[self.path].get('subcommands')
))
if self.current_mode == 'manual' and self.current_path != self.path:
self.result['diff']['before'].update(dict(
state=AlternativeState.SELECTED
))
else:
self.result['diff']['before'].update(dict(
state=AlternativeState.ABSENT
))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
path=dict(type='path', required=True),
link=dict(type='path'),
priority=dict(type='int'),
state=dict(
type='str',
choices=AlternativeState.to_list(),
default=AlternativeState.SELECTED,
),
subcommands=dict(type='list', elements='dict', aliases=['slaves'], options=dict(
name=dict(type='str', required=True),
path=dict(type='path', required=True),
link=dict(type='path', required=True),
)),
),
supports_check_mode=True,
)
AlternativesModule(module)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,328 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: ansible_galaxy_install
author:
- "Alexei Znamensky (@russoz)"
short_description: Install Ansible roles or collections using ansible-galaxy
version_added: 3.5.0
description:
- This module allows the installation of Ansible collections or roles using C(ansible-galaxy).
notes:
- Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0.
- >
The module will try and run using the C(C.UTF-8) locale.
If that fails, it will try C(en_US.UTF-8).
If that one also fails, the module will fail.
requirements:
- ansible-core 2.11 or newer
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
state:
description:
- >
If O(state=present) then the collection or role will be installed.
Note that the collections and roles are not updated with this option.
- >
Currently the O(state=latest) is ignored unless O(type=collection), and it will
ensure the collection is installed and updated to the latest available version.
- Please note that O(force=true) can be used to perform upgrade regardless of O(type).
type: str
choices: [ present, latest ]
default: present
version_added: 9.1.0
type:
description:
- The type of installation performed by C(ansible-galaxy).
- If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections.
- "Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three choices."
type: str
choices: [collection, role, both]
required: true
name:
description:
- Name of the collection or role being installed.
- >
Versions can be specified with C(ansible-galaxy) usual formats.
For example, the collection V(community.docker:1.6.1) or the role V(ansistrano.deploy,3.8.0).
- O(name) and O(requirements_file) are mutually exclusive.
type: str
requirements_file:
description:
- Path to a file containing a list of requirements to be installed.
- It works for O(type) equals to V(collection) and V(role).
- O(name) and O(requirements_file) are mutually exclusive.
type: path
dest:
description:
- The path to the directory containing your collections or roles, according to the value of O(type).
- >
Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file)
contains both roles and collections and O(dest) is specified.
type: path
no_deps:
description:
- Refrain from installing dependencies.
version_added: 4.5.0
type: bool
default: false
force:
description:
- Force overwriting existing roles and/or collections.
- It can be used for upgrading, but the module output will always report C(changed=true).
- Using O(force=true) is mandatory when downgrading.
type: bool
default: false
"""
EXAMPLES = """
- name: Install collection community.network
community.general.ansible_galaxy_install:
type: collection
name: community.network
- name: Install role at specific path
community.general.ansible_galaxy_install:
type: role
name: ansistrano.deploy
dest: /ansible/roles
- name: Install collections and roles together
community.general.ansible_galaxy_install:
type: both
requirements_file: requirements.yml
- name: Force-install collection community.network at specific version
community.general.ansible_galaxy_install:
type: collection
name: community.network:3.0.2
force: true
"""
RETURN = """
type:
description: The value of the O(type) parameter.
type: str
returned: always
name:
description: The value of the O(name) parameter.
type: str
returned: always
dest:
description: The value of the O(dest) parameter.
type: str
returned: always
requirements_file:
description: The value of the O(requirements_file) parameter.
type: str
returned: always
force:
description: The value of the O(force) parameter.
type: bool
returned: always
installed_roles:
description:
- If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
- If O(name) is specified, returns that role name and the version installed per path.
type: dict
returned: always when installing roles
contains:
"<path>":
description: Roles and versions for that path.
type: dict
sample:
/home/user42/.ansible/roles:
ansistrano.deploy: 3.9.0
baztian.xfce: v0.0.3
/custom/ansible/roles:
ansistrano.deploy: 3.8.0
installed_collections:
description:
- If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
- If O(name) is specified, returns that collection name and the version installed per path.
type: dict
returned: always when installing collections
contains:
"<path>":
description: Collections and versions for that path
type: dict
sample:
/home/az/.ansible/collections/ansible_collections:
community.docker: 1.6.0
community.general: 3.0.2
/custom/ansible/ansible_collections:
community.general: 3.1.0
new_collections:
description: New collections installed by this module.
returned: success
type: dict
sample:
community.general: 3.1.0
community.docker: 1.6.1
new_roles:
description: New roles installed by this module.
returned: success
type: dict
sample:
ansistrano.deploy: 3.8.0
baztian.xfce: v0.0.3
"""
import re
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException
class AnsibleGalaxyInstall(ModuleHelper):
_RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P<version>\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?')
_RE_LIST_PATH = re.compile(r'^# (?P<path>.*)$')
_RE_LIST_COLL = re.compile(r'^(?P<elem>\w+\.\w+)\s+(?P<version>[\d\.]+)\s*$')
_RE_LIST_ROLE = re.compile(r'^- (?P<elem>\w+\.\w+),\s+(?P<version>[\d\.]+)\s*$')
_RE_INSTALL_OUTPUT = re.compile(
r'^(?:(?P<collection>\w+\.\w+):(?P<cversion>[\d\.]+)|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\)) was installed successfully$'
)
ansible_version = None
output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps')
module = dict(
argument_spec=dict(
state=dict(type='str', choices=['present', 'latest'], default='present'),
type=dict(type='str', choices=('collection', 'role', 'both'), required=True),
name=dict(type='str'),
requirements_file=dict(type='path'),
dest=dict(type='path'),
force=dict(type='bool', default=False),
no_deps=dict(type='bool', default=False),
),
mutually_exclusive=[('name', 'requirements_file')],
required_one_of=[('name', 'requirements_file')],
required_if=[('type', 'both', ['requirements_file'])],
supports_check_mode=False,
)
use_old_vardict = False
command = 'ansible-galaxy'
command_args_formats = dict(
type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]),
galaxy_cmd=cmd_runner_fmt.as_list(),
upgrade=cmd_runner_fmt.as_bool("--upgrade"),
requirements_file=cmd_runner_fmt.as_opt_val('-r'),
dest=cmd_runner_fmt.as_opt_val('-p'),
force=cmd_runner_fmt.as_bool("--force"),
no_deps=cmd_runner_fmt.as_bool("--no-deps"),
version=cmd_runner_fmt.as_fixed("--version"),
name=cmd_runner_fmt.as_list(),
)
def _make_runner(self, lang):
return CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=lang, check_rc=True)
def _get_ansible_galaxy_version(self):
class UnsupportedLocale(ModuleHelperException):
pass
def process(rc, out, err):
if (rc != 0 and "unsupported locale setting" in err) or (rc == 0 and "cannot change locale" in err):
raise UnsupportedLocale(msg=err)
line = out.splitlines()[0]
match = self._RE_GALAXY_VERSION.match(line)
if not match:
self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line))
version = match.group("version")
version = tuple(int(x) for x in version.split('.')[:3])
return version
try:
runner = self._make_runner("C.UTF-8")
with runner("version", check_rc=False, output_process=process) as ctx:
return runner, ctx.run()
except UnsupportedLocale:
runner = self._make_runner("en_US.UTF-8")
with runner("version", check_rc=True, output_process=process) as ctx:
return runner, ctx.run()
def __init_module__(self):
self.runner, self.ansible_version = self._get_ansible_galaxy_version()
if self.ansible_version < (2, 11):
self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.")
self.vars.set("new_collections", {}, change=True)
self.vars.set("new_roles", {}, change=True)
if self.vars.type != "collection":
self.vars.installed_roles = self._list_roles()
if self.vars.type != "roles":
self.vars.installed_collections = self._list_collections()
def _list_element(self, _type, path_re, elem_re):
def process(rc, out, err):
return [] if "None of the provided paths were usable" in out else out.splitlines()
with self.runner('type galaxy_cmd dest', output_process=process, check_rc=False) as ctx:
elems = ctx.run(type=_type, galaxy_cmd='list')
elems_dict = {}
current_path = None
for line in elems:
if line.startswith("#"):
match = path_re.match(line)
if not match:
continue
if self.vars.dest is not None and match.group('path') != self.vars.dest:
current_path = None
continue
current_path = match.group('path') if match else None
elems_dict[current_path] = {}
elif current_path is not None:
match = elem_re.match(line)
if not match or (self.vars.name is not None and match.group('elem') != self.vars.name):
continue
elems_dict[current_path][match.group('elem')] = match.group('version')
return elems_dict
def _list_collections(self):
return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL)
def _list_roles(self):
return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE)
def __run__(self):
def process(rc, out, err):
for line in out.splitlines():
match = self._RE_INSTALL_OUTPUT.match(line)
if not match:
continue
if match.group("collection"):
self.vars.new_collections[match.group("collection")] = match.group("cversion")
elif match.group("role"):
self.vars.new_roles[match.group("role")] = match.group("rversion")
upgrade = (self.vars.type == "collection" and self.vars.state == "latest")
with self.runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx:
ctx.run(galaxy_cmd="install", upgrade=upgrade)
if self.verbosity > 2:
self.vars.set("run_info", ctx.run_info)
def main():
AnsibleGalaxyInstall.execute()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,452 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: apache2_mod_proxy
author: Olivier Boukili (@oboukili)
short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
description:
- Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
status page has to be enabled and accessible, as this module relies on parsing
this page. This module supports ansible check_mode, and requires BeautifulSoup
python module.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
balancer_url_suffix:
type: str
description:
- Suffix of the balancer pool url required to access the balancer pool
status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
default: /balancer-manager/
balancer_vhost:
type: str
description:
- (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
required: true
member_host:
type: str
description:
- (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
Port number is autodetected and should not be specified here.
If undefined, apache2_mod_proxy module will return a members list of
dictionaries of all the current balancer pool members' attributes.
state:
type: str
description:
- Desired state of the member host.
(absent|disabled),drained,hot_standby,ignore_errors can be
simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
- 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
tls:
description:
- Use https to access balancer management page.
type: bool
default: false
validate_certs:
description:
- Validate ssl/tls certificates.
type: bool
default: true
'''
EXAMPLES = '''
- name: Get all current balancer pool members attributes
community.general.apache2_mod_proxy:
balancer_vhost: 10.0.0.2
- name: Get a specific member attributes
community.general.apache2_mod_proxy:
balancer_vhost: myws.mydomain.org
balancer_suffix: /lb/
member_host: node1.myws.mydomain.org
# Enable all balancer pool members:
- name: Get attributes
community.general.apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
register: result
- name: Enable all balancer pool members
community.general.apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
member_host: '{{ item.host }}'
state: present
with_items: '{{ result.members }}'
# Gracefully disable a member from a loadbalancer node:
- name: Step 1
community.general.apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: drained
delegate_to: myloadbalancernode
- name: Step 2
ansible.builtin.wait_for:
host: '{{ member.host }}'
port: '{{ member.port }}'
state: drained
delegate_to: myloadbalancernode
- name: Step 3
community.general.apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: absent
delegate_to: myloadbalancernode
'''
RETURN = '''
member:
description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
type: dict
returned: success
sample:
{"attributes":
{"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
}
members:
description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
returned: success
type: list
sample:
[{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
},
{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.21",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false}
}
]
'''
import re
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six import iteritems
BEAUTIFUL_SOUP_IMP_ERR = None
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
HAS_BEAUTIFULSOUP = False
else:
HAS_BEAUTIFULSOUP = True
# balancer member attributes extraction regexp:
EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
# Apache2 server version extraction regexp:
APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
def regexp_extraction(string, _regexp, groups=1):
""" Returns the capture group (default=1) specified in the regexp, applied to the string """
regexp_search = re.search(string=str(string), pattern=str(_regexp))
if regexp_search:
if regexp_search.group(groups) != '':
return str(regexp_search.group(groups))
return None
class BalancerMember(object):
""" Apache 2.4 mod_proxy LB balancer member.
attributes:
read-only:
host -> member host (string),
management_url -> member management url (string),
protocol -> member protocol (string)
port -> member port (string),
path -> member location (string),
balancer_url -> url of this member's parent balancer (string),
attributes -> whole member attributes (dictionary)
module -> ansible module instance (AnsibleModule object).
writable:
status -> status of the member (dictionary)
"""
def __init__(self, management_url, balancer_url, module):
self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
self.management_url = str(management_url)
self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
self.port = regexp_extraction(management_url, EXPRESSION, 5)
self.path = regexp_extraction(management_url, EXPRESSION, 6)
self.balancer_url = str(balancer_url)
self.module = module
def get_member_attributes(self):
""" Returns a dictionary of a balancer member's attributes."""
balancer_member_page = fetch_url(self.module, self.management_url)
if balancer_member_page[1]['status'] != 200:
self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
else:
try:
soup = BeautifulSoup(balancer_member_page[0])
except TypeError as exc:
self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(exc))
else:
subsoup = soup.findAll('table')[1].findAll('tr')
keys = subsoup[0].findAll('th')
for valuesset in subsoup[1::1]:
if re.search(pattern=self.host, string=str(valuesset)):
values = valuesset.findAll('td')
return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
def get_member_status(self):
""" Returns a dictionary of a balancer member's status attributes."""
status_mapping = {'disabled': 'Dis',
'drained': 'Drn',
'hot_standby': 'Stby',
'ignore_errors': 'Ign'}
actual_status = str(self.attributes['Status'])
status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping))
return status
def set_member_status(self, values):
""" Sets a balancer member's status attributes amongst pre-mapped values."""
values_mapping = {'disabled': '&w_status_D',
'drained': '&w_status_N',
'hot_standby': '&w_status_H',
'ignore_errors': '&w_status_I'}
request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping))
request_body = "{0}{1}".format(request_body, values_url)
response = fetch_url(self.module, self.management_url, data=request_body)
if response[1]['status'] != 200:
self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
attributes = property(get_member_attributes)
status = property(get_member_status, set_member_status)
class Balancer(object):
""" Apache httpd 2.4 mod_proxy balancer object"""
def __init__(self, host, suffix, module, members=None, tls=False):
if tls:
self.base_url = 'https://' + str(host)
self.url = 'https://' + str(host) + str(suffix)
else:
self.base_url = 'http://' + str(host)
self.url = 'http://' + str(host) + str(suffix)
self.module = module
self.page = self.fetch_balancer_page()
if members is None:
self._members = []
def fetch_balancer_page(self):
""" Returns the balancer management html page as a string for later parsing."""
page = fetch_url(self.module, str(self.url))
if page[1]['status'] != 200:
self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
else:
content = page[0].read()
apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
if apache_version:
if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
return content
else:
self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
def get_balancer_members(self):
""" Returns members of the balancer as a generator object for later iteration."""
try:
soup = BeautifulSoup(self.page)
except TypeError:
self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
else:
for element in soup.findAll('a')[1::1]:
balancer_member_suffix = str(element.get('href'))
if not balancer_member_suffix:
self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
else:
yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
members = property(get_balancer_members)
def main():
""" Initiates module."""
module = AnsibleModule(
argument_spec=dict(
balancer_vhost=dict(required=True, type='str'),
balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
member_host=dict(type='str'),
state=dict(type='str'),
tls=dict(default=False, type='bool'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
)
if HAS_BEAUTIFULSOUP is False:
module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
if module.params['state'] is not None:
states = module.params['state'].split(',')
if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
else:
for _state in states:
if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
module.fail_json(
msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
)
else:
states = ['None']
mybalancer = Balancer(module.params['balancer_vhost'],
module.params['balancer_url_suffix'],
module=module,
tls=module.params['tls'])
if module.params['member_host'] is None:
json_output_list = []
for member in mybalancer.members:
json_output_list.append({
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
})
module.exit_json(
changed=False,
members=json_output_list
)
else:
changed = False
member_exists = False
member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
for mode in member_status.keys():
for state in states:
if mode == state:
member_status[mode] = True
elif mode == 'disabled' and state == 'absent':
member_status[mode] = True
for member in mybalancer.members:
if str(member.host) == str(module.params['member_host']):
member_exists = True
if module.params['state'] is not None:
member_status_before = member.status
if not module.check_mode:
member_status_after = member.status = member_status
else:
member_status_after = member_status
if member_status_before != member_status_after:
changed = True
json_output = {
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
}
if member_exists:
module.exit_json(
changed=changed,
member=json_output
)
else:
module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
if __name__ == '__main__':
main()

View File

@ -0,0 +1,297 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: apache2_module
author:
- Christian Berendt (@berendt)
- Ralf Hertel (@n0trax)
- Robin Roth (@robinro)
short_description: Enables/disables a module of the Apache2 webserver
description:
- Enables or disables a specified module of the Apache2 webserver.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
type: str
description:
- Name of the module to enable/disable as given to C(a2enmod/a2dismod).
required: true
identifier:
type: str
description:
- Identifier of the module as listed by C(apache2ctl -M).
This is optional and usually determined automatically by the common convention of
appending V(_module) to O(name) as well as custom exception for popular modules.
required: false
force:
description:
- Force disabling of default modules and override Debian warnings.
required: false
type: bool
default: false
state:
type: str
description:
- Desired state of the module.
choices: ['present', 'absent']
default: present
ignore_configcheck:
description:
- Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
type: bool
default: false
warn_mpm_absent:
description:
- Control the behavior of the warning process for MPM modules.
type: bool
default: true
version_added: 6.3.0
requirements: ["a2enmod","a2dismod"]
notes:
- This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.
Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.
'''
EXAMPLES = '''
- name: Enable the Apache2 module wsgi
community.general.apache2_module:
state: present
name: wsgi
- name: Disables the Apache2 module wsgi
community.general.apache2_module:
state: absent
name: wsgi
- name: Disable default modules for Debian
community.general.apache2_module:
state: absent
name: autoindex
force: true
- name: Disable mpm_worker and ignore warnings about missing mpm module
community.general.apache2_module:
state: absent
name: mpm_worker
ignore_configcheck: true
- name: Disable mpm_event, enable mpm_prefork and ignore warnings about missing mpm module
community.general.apache2_module:
name: "{{ item.module }}"
state: "{{ item.state }}"
warn_mpm_absent: false
ignore_configcheck: true
loop:
- module: mpm_event
state: absent
- module: mpm_prefork
state: present
- name: Enable dump_io module, which is identified as dumpio_module inside apache2
community.general.apache2_module:
state: present
name: dump_io
identifier: dumpio_module
'''
RETURN = '''
result:
description: message about action taken
returned: always
type: str
warnings:
description: list of warning messages
returned: when needed
type: list
rc:
description: return code of underlying command
returned: failed
type: int
stdout:
description: stdout of underlying command
returned: failed
type: str
stderr:
description: stderr of underlying command
returned: failed
type: str
'''
import re
# import module snippets
from ansible.module_utils.basic import AnsibleModule
_re_threaded = re.compile(r'threaded: *yes')
def _run_threaded(module):
control_binary = _get_ctl_binary(module)
result, stdout, stderr = module.run_command([control_binary, "-V"])
return bool(_re_threaded.search(stdout))
def _get_ctl_binary(module):
for command in ['apache2ctl', 'apachectl']:
ctl_binary = module.get_bin_path(command)
if ctl_binary is not None:
return ctl_binary
module.fail_json(msg="Neither of apache2ctl nor apachectl found. At least one apache control binary is necessary.")
def _module_is_enabled(module):
control_binary = _get_ctl_binary(module)
result, stdout, stderr = module.run_command([control_binary, "-M"])
if result != 0:
error_msg = "Error executing %s: %s" % (control_binary, stderr)
if module.params['ignore_configcheck']:
if 'AH00534' in stderr and 'mpm_' in module.params['name']:
if module.params['warn_mpm_absent']:
module.warnings.append(
"No MPM module loaded! apache2 reload AND other module actions"
" will fail if no MPM module is loaded immediately."
)
else:
module.warnings.append(error_msg)
return False
else:
module.fail_json(msg=error_msg)
searchstring = ' ' + module.params['identifier']
return searchstring in stdout
def create_apache_identifier(name):
"""
By convention if a module is loaded via name, it appears in apache2ctl -M as
name_module.
Some modules don't follow this convention and we use replacements for those."""
# a2enmod name replacement to apache2ctl -M names
text_workarounds = [
('shib', 'mod_shib'),
('shib2', 'mod_shib'),
('evasive', 'evasive20_module'),
]
# re expressions to extract subparts of names
re_workarounds = [
('php', re.compile(r'^(php\d)\.')),
]
for a2enmod_spelling, module_name in text_workarounds:
if a2enmod_spelling in name:
return module_name
for search, reexpr in re_workarounds:
if search in name:
try:
rematch = reexpr.search(name)
return rematch.group(1) + '_module'
except AttributeError:
pass
return name + '_module'
def _set_state(module, state):
name = module.params['name']
force = module.params['force']
want_enabled = state == 'present'
state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
success_msg = "Module %s %s" % (name, state_string)
if _module_is_enabled(module) != want_enabled:
if module.check_mode:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
a2mod_binary_path = module.get_bin_path(a2mod_binary)
if a2mod_binary_path is None:
module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
a2mod_binary_cmd = [a2mod_binary_path]
if not want_enabled and force:
# force exists only for a2dismod on debian
a2mod_binary_cmd.append('-f')
result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])
if _module_is_enabled(module) == want_enabled:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
else:
msg = (
'Failed to set module {name} to {state}:\n'
'{stdout}\n'
'Maybe the module identifier ({identifier}) was guessed incorrectly.'
'Consider setting the "identifier" option.'
).format(
name=name,
state=state_string,
stdout=stdout,
identifier=module.params['identifier']
)
module.fail_json(msg=msg,
rc=result,
stdout=stdout,
stderr=stderr)
else:
module.exit_json(changed=False,
result=success_msg,
warnings=module.warnings)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
identifier=dict(type='str'),
force=dict(type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
ignore_configcheck=dict(type='bool', default=False),
warn_mpm_absent=dict(type='bool', default=True),
),
supports_check_mode=True,
)
module.warnings = []
name = module.params['name']
if name == 'cgi' and _run_threaded(module):
module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.")
if not module.params['identifier']:
module.params['identifier'] = create_apache_identifier(module.params['name'])
if module.params['state'] in ['present', 'absent']:
_set_state(module, module.params['state'])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,380 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
# and apt (Matthew Williams <matthew@flowroute.com>) modules.
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages C(apk) packages for Alpine Linux.
author: "Kevin Brebanov (@kbrebanov)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
available:
description:
- During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
if the currently installed package is no longer available from any repository.
type: bool
default: false
name:
description:
- A package name, like V(foo), or multiple packages, like V(foo,bar).
- Do not include additional whitespace when specifying multiple packages as a string.
Prefer YAML lists over comma-separating multiple package names.
type: list
elements: str
no_cache:
description:
- Do not use any local cache path.
type: bool
default: false
version_added: 1.0.0
repository:
description:
- A package repository or multiple repositories.
Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
type: list
elements: str
state:
description:
- Indicates the desired package(s) state.
- V(present) ensures the package(s) is/are present. V(installed) can be used as an alias.
- V(absent) ensures the package(s) is/are absent. V(removed) can be used as an alias.
- V(latest) ensures the package(s) is/are present and the latest version(s).
default: present
choices: [ "present", "absent", "latest", "installed", "removed" ]
type: str
update_cache:
description:
- Update repository indexes. Can be run with other steps or on its own.
type: bool
default: false
upgrade:
description:
- Upgrade all installed packages to their latest version.
type: bool
default: false
world:
description:
- Use a custom world file when checking for explicitly installed packages.
type: str
default: /etc/apk/world
version_added: 5.4.0
notes:
- 'O(name) and O(upgrade) are mutually exclusive.'
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
'''
EXAMPLES = '''
- name: Update repositories and install foo package
community.general.apk:
name: foo
update_cache: true
- name: Update repositories and install foo and bar packages
community.general.apk:
name: foo,bar
update_cache: true
- name: Remove foo package
community.general.apk:
name: foo
state: absent
- name: Remove foo and bar packages
community.general.apk:
name: foo,bar
state: absent
- name: Install the package foo
community.general.apk:
name: foo
state: present
- name: Install the packages foo and bar
community.general.apk:
name: foo,bar
state: present
- name: Update repositories and update package foo to latest version
community.general.apk:
name: foo
state: latest
update_cache: true
- name: Update repositories and update packages foo and bar to latest versions
community.general.apk:
name: foo,bar
state: latest
update_cache: true
- name: Update all installed packages to the latest versions
community.general.apk:
upgrade: true
- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
community.general.apk:
available: true
upgrade: true
- name: Update repositories as a separate step
community.general.apk:
update_cache: true
- name: Install package from a specific repository
community.general.apk:
name: foo
state: latest
update_cache: true
repository: http://dl-3.alpinelinux.org/alpine/edge/main
- name: Install package without using cache
community.general.apk:
name: foo
state: latest
no_cache: true
- name: Install package checking a custom world
community.general.apk:
name: foo
state: latest
world: /etc/apk/world.custom
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when packages have changed
type: list
sample: ['package', 'other-package']
'''
import re
# Import module snippets.
from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data:
p = regex.search(l)
if p:
packages.append(p.group(1))
return packages
def update_package_db(module, exit):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
elif exit:
module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
else:
return True
def query_toplevel(module, name, world):
# world contains a list of top-level packages separated by ' ' or \n
# packages may contain repository (@) or version (=<>~) separator characters or start with negation !
regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
with open(world) as f:
content = f.read().split()
for p in content:
if regex.search(p):
return True
return False
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"^%s: virtual meta package" % (re.escape(name))
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module, available):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
if available:
cmd = "%s --available" % cmd
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
if re.search(r'^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
def install_packages(module, names, state, world):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_toplevel(module, name, world):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install + to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
# Check to see if packages are still present because of dependencies
for name in installed:
if query_package(module, name):
rc = 1
break
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name=dict(type='list', elements='str'),
no_cache=dict(default=False, type='bool'),
repository=dict(type='list', elements='str'),
update_cache=dict(default=False, type='bool'),
upgrade=dict(default=False, type='bool'),
available=dict(default=False, type='bool'),
world=dict(default='/etc/apk/world', type='str'),
),
required_one_of=[['name', 'update_cache', 'upgrade']],
mutually_exclusive=[['name', 'upgrade']],
supports_check_mode=True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
if p['no_cache']:
APK_PATH = "%s --no-cache" % (APK_PATH, )
# add repositories to the APK_PATH
if p['repository']:
for r in p['repository']:
APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module, not p['name'] and not p['upgrade'])
if p['upgrade']:
upgrade_packages(module, p['available'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'], p['world'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,154 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Mikhail Gordeev
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: apt_repo
short_description: Manage APT repositories via apt-repo
description:
- Manages APT repositories using apt-repo tool.
- See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
notes:
- This module works on ALT based distros.
- Does NOT support checkmode, due to a limitation in apt-repo tool.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
repo:
description:
- Name of the repository to add or remove.
required: true
type: str
state:
description:
- Indicates the desired repository state.
choices: [ absent, present ]
default: present
type: str
remove_others:
description:
- Remove other then added repositories
- Used if O(state=present)
type: bool
default: false
update:
description:
- Update the package database after changing repositories.
type: bool
default: false
author:
- Mikhail Gordeev (@obirvalger)
'''
EXAMPLES = '''
- name: Remove all repositories
community.general.apt_repo:
repo: all
state: absent
- name: Add repository `Sisysphus` and remove other repositories
community.general.apt_repo:
repo: Sisysphus
state: present
remove_others: true
- name: Add local repository `/space/ALT/Sisyphus` and update package cache
community.general.apt_repo:
repo: copy:///space/ALT/Sisyphus
state: present
update: true
'''
RETURN = ''' # '''
import os
from ansible.module_utils.basic import AnsibleModule
APT_REPO_PATH = "/usr/bin/apt-repo"
def apt_repo(module, *args):
"""run apt-repo with args and return its output"""
# make args list to use in concatenation
args = list(args)
rc, out, err = module.run_command([APT_REPO_PATH] + args)
if rc != 0:
module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
return out
def add_repo(module, repo):
"""add a repository"""
apt_repo(module, 'add', repo)
def rm_repo(module, repo):
"""remove a repository"""
apt_repo(module, 'rm', repo)
def set_repo(module, repo):
"""add a repository and remove other repositories"""
# first add to validate repository
apt_repo(module, 'add', repo)
apt_repo(module, 'rm', 'all')
apt_repo(module, 'add', repo)
def update(module):
"""update package cache"""
apt_repo(module, 'update')
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
remove_others=dict(type='bool', default=False),
update=dict(type='bool', default=False),
),
)
if not os.path.exists(APT_REPO_PATH):
module.fail_json(msg='cannot find /usr/bin/apt-repo')
params = module.params
repo = params['repo']
state = params['state']
old_repositories = apt_repo(module)
if state == 'present':
if params['remove_others']:
set_repo(module, repo)
else:
add_repo(module, repo)
elif state == 'absent':
rm_repo(module, repo)
if params['update']:
update(module)
new_repositories = apt_repo(module)
changed = old_repositories != new_repositories
module.exit_json(changed=changed, repo=repo, state=state)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,361 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Evgenii Terechkov
# Written by Evgenii Terechkov <evg@altlinux.org>
# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: apt_rpm
short_description: APT-RPM package manager
description:
- Manages packages with C(apt-rpm). Both low-level (C(rpm)) and high-level (C(apt-get)) package manager binaries required.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
package:
description:
- List of packages to install, upgrade, or remove.
- Since community.general 8.0.0, may include paths to local C(.rpm) files
if O(state=installed) or O(state=present), requires C(rpm) python
module.
aliases: [ name, pkg ]
type: list
elements: str
state:
description:
- Indicates the desired package state.
- Please note that V(present) and V(installed) are equivalent to V(latest) right now.
This will change in the future. To simply ensure that a package is installed, without upgrading
it, use the V(present_not_latest) state.
- The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0.
choices:
- absent
- present
- present_not_latest
- installed
- removed
- latest
default: present
type: str
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
- Default is not to update the cache.
type: bool
default: false
clean:
description:
- Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
- Can be run as part of the package installation (clean runs before install) or as a separate step.
type: bool
default: false
version_added: 6.5.0
dist_upgrade:
description:
- If true performs an C(apt-get dist-upgrade) to upgrade system.
type: bool
default: false
version_added: 6.5.0
update_kernel:
description:
- If true performs an C(update-kernel) to upgrade kernel packages.
type: bool
default: false
version_added: 6.5.0
requirements:
- C(rpm) python package (rpm bindings), optional. Required if O(package)
option includes local files.
author:
- Evgenii Terechkov (@evgkrsk)
'''
EXAMPLES = '''
- name: Install package foo
community.general.apt_rpm:
pkg: foo
state: present
- name: Install packages foo and bar
community.general.apt_rpm:
pkg:
- foo
- bar
state: present
- name: Remove package foo
community.general.apt_rpm:
pkg: foo
state: absent
- name: Remove packages foo and bar
community.general.apt_rpm:
pkg: foo,bar
state: absent
# bar will be the updated if a newer version exists
- name: Update the package database and install bar
community.general.apt_rpm:
name: bar
state: present
update_cache: true
- name: Run the equivalent of "apt-get clean" as a separate step
community.general.apt_rpm:
clean: true
- name: Perform cache update and complete system upgrade (includes kernel)
community.general.apt_rpm:
update_cache: true
dist_upgrade: true
update_kernel: true
'''
import os
import re
import traceback
from ansible.module_utils.basic import (
AnsibleModule,
missing_required_lib,
)
from ansible.module_utils.common.text.converters import to_native
try:
import rpm
except ImportError:
HAS_RPM_PYTHON = False
RPM_PYTHON_IMPORT_ERROR = traceback.format_exc()
else:
HAS_RPM_PYTHON = True
RPM_PYTHON_IMPORT_ERROR = None
APT_CACHE = "/usr/bin/apt-cache"
APT_PATH = "/usr/bin/apt-get"
RPM_PATH = "/usr/bin/rpm"
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
UPDATE_KERNEL_ZERO = "\nTry to install new kernel "
def local_rpm_package_name(path):
"""return package name of a local rpm passed in.
Inspired by ansible.builtin.yum"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return to_native(header[rpm.RPMTAG_NAME])
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command([RPM_PATH, "-q", name])
if rc == 0:
return True
else:
return False
def check_package_version(module, name):
# compare installed and candidate version
# if newest version already installed return True
# otherwise return False
rc, out, err = module.run_command([APT_CACHE, "policy", name], environ_update={"LANG": "C"})
installed = re.split("\n |: ", out)[2]
candidate = re.split("\n |: ", out)[4]
if installed >= candidate:
return True
return False
def query_package_provides(module, name, allow_upgrade=False):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
if name.endswith('.rpm'):
# Likely a local RPM file
if not HAS_RPM_PYTHON:
module.fail_json(
msg=missing_required_lib('rpm'),
exception=RPM_PYTHON_IMPORT_ERROR,
)
name = local_rpm_package_name(name)
rc, out, err = module.run_command([RPM_PATH, "-q", "--provides", name])
if rc == 0:
if not allow_upgrade:
return True
if check_package_version(module, name):
return True
return False
def update_package_db(module):
rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"})
return (False, update_out)
def dir_size(module, path):
total_size = 0
for path, dirs, files in os.walk(path):
for f in files:
total_size += os.path.getsize(os.path.join(path, f))
return total_size
def clean(module):
t = dir_size(module, "/var/cache/apt/archives")
rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True)
return (t != dir_size(module, "/var/cache/apt/archives"), out)
def dist_upgrade(module):
rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"})
return (APT_GET_ZERO not in out, out)
def update_kernel(module):
rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"})
return (UPDATE_KERNEL_ZERO not in out, out)
def remove_packages(module, packages):
if packages is None:
return (False, "Empty package list")
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command([APT_PATH, "-y", "remove", package], environ_update={"LANG": "C"})
if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgspec, allow_upgrade=False):
if pkgspec is None:
return (False, "Empty package list")
packages = []
for package in pkgspec:
if not query_package_provides(module, package, allow_upgrade=allow_upgrade):
packages.append(package)
if packages:
command = [APT_PATH, "-y", "install"] + packages
rc, out, err = module.run_command(command, environ_update={"LANG": "C"})
installed = True
for package in pkgspec:
if not query_package_provides(module, package, allow_upgrade=False):
installed = False
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'%s' failed: %s" % (" ".join(command), err))
else:
return (True, "%s present(s)" % packages)
else:
return (False, "Nothing to install")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed', 'present_not_latest', 'latest']),
update_cache=dict(type='bool', default=False),
clean=dict(type='bool', default=False),
dist_upgrade=dict(type='bool', default=False),
update_kernel=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['name', 'pkg']),
),
)
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
if p['state'] in ['installed', 'present']:
module.deprecate(
'state=%s currently behaves unexpectedly by always upgrading to the latest version if'
' the package is already installed. This behavior is deprecated and will change in'
' community.general 11.0.0. You can use state=latest to explicitly request this behavior'
' or state=present_not_latest to explicitly request the behavior that state=%s will have'
' in community.general 11.0.0, namely that the package will not be upgraded if it is'
' already installed.' % (p['state'], p['state']),
version='11.0.0',
collection_name='community.general',
)
modified = False
output = ""
if p['update_cache']:
update_package_db(module)
if p['clean']:
(m, out) = clean(module)
modified = modified or m
if p['dist_upgrade']:
(m, out) = dist_upgrade(module)
modified = modified or m
output += out
if p['update_kernel']:
(m, out) = update_kernel(module)
modified = modified or m
output += out
packages = p['package']
if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']:
(m, out) = install_packages(module, packages, allow_upgrade=p['state'] != 'present_not_latest')
modified = modified or m
output += out
if p['state'] in ['absent', 'removed']:
(m, out) = remove_packages(module, packages)
modified = modified or m
output += out
# Return total modification status and output of all commands
module.exit_json(changed=modified, msg=output)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,685 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Ben Doherty <bendohmv@gmail.com>
# Sponsored by Oomph, Inc. http://www.oomphinc.com
# Copyright (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: archive
short_description: Creates a compressed archive of one or more files or trees
extends_documentation_fragment:
- files
- community.general.attributes
description:
- Creates or extends an archive.
- The source and archive are on the remote host, and the archive I(is not) copied to the local host.
- Source files can be deleted after archival by specifying O(remove=True).
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
path:
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
type: list
elements: path
required: true
format:
description:
- The type of compression to use.
type: str
choices: [ bz2, gz, tar, xz, zip ]
default: gz
dest:
description:
- The file name of the destination archive. The parent directory must exists on the remote host.
- This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
- If the destination archive already exists, it will be truncated and overwritten.
type: path
exclude_path:
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to exclude from O(path) list and glob expansion.
- Use O(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the O(path) list.
type: list
elements: path
default: []
exclusion_patterns:
description:
- Glob style patterns to exclude files or directories from the resulting archive.
- This differs from O(exclude_path) which applies only to the source paths from O(path).
type: list
elements: path
version_added: 3.2.0
force_archive:
description:
- Allows you to force the module to treat this as an archive even if only a single file is specified.
- By default when a single file is specified it is compressed only (not archived).
- Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module.
type: bool
default: false
remove:
description:
- Remove any added source files and trees after adding to archive.
type: bool
default: false
notes:
- Can produce C(gzip), C(bzip2), C(lzma), and C(zip) compressed files or archives.
- This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives.
These are part of the Python standard library for Python 2 and 3.
requirements:
- Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format.
seealso:
- module: ansible.builtin.unarchive
author:
- Ben Doherty (@bendoh)
'''
EXAMPLES = r'''
- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
community.general.archive:
path: /path/to/foo
dest: /path/to/foo.tgz
- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
community.general.archive:
path: /path/to/foo
remove: true
- name: Create a zip archive of /path/to/foo
community.general.archive:
path: /path/to/foo
format: zip
- name: Create a bz2 archive of multiple files, rooted at /path
community.general.archive:
path:
- /path/to/foo
- /path/wong/foo
dest: /path/file.tar.bz2
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
community.general.archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/bar
- /path/to/foo/baz
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
community.general.archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/ba*
format: bz2
- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
community.general.archive:
path: /path/to/foo/single.file
dest: /path/file.gz
format: gz
- name: Create a tar.gz archive of a single file.
community.general.archive:
path: /path/to/foo/single.file
dest: /path/file.tar.gz
format: gz
force_archive: true
'''
RETURN = r'''
state:
description:
The state of the input O(path).
type: str
returned: always
dest_state:
description:
- The state of the O(dest) file.
- V(absent) when the file does not exist.
- V(archive) when the file is an archive.
- V(compress) when the file is compressed, but not an archive.
- V(incomplete) when the file is an archive, but some files under O(path) were not found.
type: str
returned: success
version_added: 3.4.0
missing:
description: Any files that were missing from the source.
type: list
returned: success
archived:
description: Any files that were compressed or added to the archive.
type: list
returned: success
arcroot:
description: The archive root.
type: str
returned: always
expanded_paths:
description: The list of matching paths from paths argument.
type: list
returned: always
expanded_exclude_paths:
description: The list of matching exclude paths from the exclude_path argument.
type: list
returned: always
'''
import abc
import bz2
import glob
import gzip
import io
import os
import re
import shutil
import tarfile
import zipfile
from fnmatch import fnmatch
from sys import version_info
from traceback import format_exc
from zlib import crc32
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils import six
try: # python 3.2+
from zipfile import BadZipFile # type: ignore[attr-defined]
except ImportError: # older python
from zipfile import BadZipfile as BadZipFile
LZMA_IMP_ERR = None
if six.PY3:
try:
import lzma
HAS_LZMA = True
except ImportError:
LZMA_IMP_ERR = format_exc()
HAS_LZMA = False
else:
try:
from backports import lzma
HAS_LZMA = True
except ImportError:
LZMA_IMP_ERR = format_exc()
HAS_LZMA = False
PY27 = version_info[0:2] >= (2, 7)
STATE_ABSENT = 'absent'
STATE_ARCHIVED = 'archive'
STATE_COMPRESSED = 'compress'
STATE_INCOMPLETE = 'incomplete'
def common_path(paths):
empty = b'' if paths and isinstance(paths[0], six.binary_type) else ''
return os.path.join(
os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty
)
def expand_paths(paths):
expanded_path = []
is_globby = False
for path in paths:
b_path = _to_bytes(path)
if b'*' in b_path or b'?' in b_path:
e_paths = glob.glob(b_path)
is_globby = True
else:
e_paths = [b_path]
expanded_path.extend(e_paths)
return expanded_path, is_globby
def matches_exclusion_patterns(path, exclusion_patterns):
return any(fnmatch(path, p) for p in exclusion_patterns)
def is_archive(path):
return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE)
def strip_prefix(prefix, string):
return string[len(prefix):] if string.startswith(prefix) else string
def _to_bytes(s):
return to_bytes(s, errors='surrogate_or_strict')
def _to_native(s):
return to_native(s, errors='surrogate_or_strict')
def _to_native_ascii(s):
return to_native(s, errors='surrogate_or_strict', encoding='ascii')
@six.add_metaclass(abc.ABCMeta)
class Archive(object):
def __init__(self, module):
self.module = module
self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None
self.exclusion_patterns = module.params['exclusion_patterns'] or []
self.format = module.params['format']
self.must_archive = module.params['force_archive']
self.remove = module.params['remove']
self.changed = False
self.destination_state = STATE_ABSENT
self.errors = []
self.file = None
self.successes = []
self.targets = []
self.not_found = []
paths = module.params['path']
self.expanded_paths, has_globs = expand_paths(paths)
self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0]
self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths))
if not self.paths:
module.fail_json(
path=', '.join(paths),
expanded_paths=_to_native(b', '.join(self.expanded_paths)),
expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)),
msg='Error, no source paths were found'
)
self.root = common_path(self.paths)
if not self.must_archive:
self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1])
if not self.destination and not self.must_archive:
self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format))
if self.must_archive and not self.destination:
module.fail_json(
dest=_to_native(self.destination),
path=', '.join(paths),
msg='Error, must specify "dest" when archiving multiple files or trees'
)
if self.remove:
self._check_removal_safety()
self.original_checksums = self.destination_checksums()
self.original_size = self.destination_size()
def add(self, path, archive_name):
try:
self._add(_to_native_ascii(path), _to_native(archive_name))
if self.contains(_to_native(archive_name)):
self.successes.append(path)
except Exception as e:
self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e)))
def add_single_target(self, path):
if self.format in ('zip', 'tar'):
self.open()
self.add(path, strip_prefix(self.root, path))
self.close()
self.destination_state = STATE_ARCHIVED
else:
try:
f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb')
with open(path, 'rb') as f_in:
shutil.copyfileobj(f_in, f_out)
f_out.close()
self.successes.append(path)
self.destination_state = STATE_COMPRESSED
except (IOError, OSError) as e:
self.module.fail_json(
path=_to_native(path),
dest=_to_native(self.destination),
msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc()
)
def add_targets(self):
self.open()
try:
for target in self.targets:
if os.path.isdir(target):
for directory_path, directory_names, file_names in os.walk(target, topdown=True):
for directory_name in directory_names:
full_path = os.path.join(directory_path, directory_name)
self.add(full_path, strip_prefix(self.root, full_path))
for file_name in file_names:
full_path = os.path.join(directory_path, file_name)
self.add(full_path, strip_prefix(self.root, full_path))
else:
self.add(target, strip_prefix(self.root, target))
except Exception as e:
if self.format in ('zip', 'tar'):
archive_format = self.format
else:
archive_format = 'tar.' + self.format
self.module.fail_json(
msg='Error when writing %s archive at %s: %s' % (
archive_format, _to_native(self.destination), _to_native(e)
),
exception=format_exc()
)
self.close()
if self.errors:
self.module.fail_json(
msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors))
)
def is_different_from_original(self):
if self.original_checksums is None:
return self.original_size != self.destination_size()
else:
return self.original_checksums != self.destination_checksums()
def destination_checksums(self):
if self.destination_exists() and self.destination_readable():
return self._get_checksums(self.destination)
return None
def destination_exists(self):
return self.destination and os.path.exists(self.destination)
def destination_readable(self):
return self.destination and os.access(self.destination, os.R_OK)
def destination_size(self):
return os.path.getsize(self.destination) if self.destination_exists() else 0
def find_targets(self):
for path in self.paths:
if not os.path.lexists(path):
self.not_found.append(path)
else:
self.targets.append(path)
def has_targets(self):
return bool(self.targets)
def has_unfound_targets(self):
return bool(self.not_found)
def remove_single_target(self, path):
try:
os.remove(path)
except OSError as e:
self.module.fail_json(
path=_to_native(path),
msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc()
)
def remove_targets(self):
for path in self.successes:
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
self.errors.append(_to_native(path))
for path in self.paths:
try:
if os.path.isdir(path):
shutil.rmtree(path)
except OSError:
self.errors.append(_to_native(path))
if self.errors:
self.module.fail_json(
dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors
)
def update_permissions(self):
file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination)
self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed)
@property
def result(self):
return {
'archived': [_to_native(p) for p in self.successes],
'dest': _to_native(self.destination),
'dest_state': self.destination_state,
'changed': self.changed,
'arcroot': _to_native(self.root),
'missing': [_to_native(p) for p in self.not_found],
'expanded_paths': [_to_native(p) for p in self.expanded_paths],
'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths],
}
def _check_removal_safety(self):
for path in self.paths:
if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')):
self.module.fail_json(
path=b', '.join(self.paths),
msg='Error, created archive can not be contained in source paths when remove=true'
)
def _open_compressed_file(self, path, mode):
f = None
if self.format == 'gz':
f = gzip.open(path, mode)
elif self.format == 'bz2':
f = bz2.BZ2File(path, mode)
elif self.format == 'xz':
f = lzma.LZMAFile(path, mode)
else:
self.module.fail_json(msg="%s is not a valid format" % self.format)
return f
@abc.abstractmethod
def close(self):
pass
@abc.abstractmethod
def contains(self, name):
pass
@abc.abstractmethod
def open(self):
pass
@abc.abstractmethod
def _add(self, path, archive_name):
pass
@abc.abstractmethod
def _get_checksums(self, path):
pass
class ZipArchive(Archive):
def __init__(self, module):
super(ZipArchive, self).__init__(module)
def close(self):
self.file.close()
def contains(self, name):
try:
self.file.getinfo(name)
except KeyError:
return False
return True
def open(self):
self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True)
def _add(self, path, archive_name):
if not matches_exclusion_patterns(path, self.exclusion_patterns):
self.file.write(path, archive_name)
def _get_checksums(self, path):
try:
archive = zipfile.ZipFile(_to_native_ascii(path), 'r')
checksums = set((info.filename, info.CRC) for info in archive.infolist())
archive.close()
except BadZipFile:
checksums = set()
return checksums
class TarArchive(Archive):
def __init__(self, module):
super(TarArchive, self).__init__(module)
self.fileIO = None
def close(self):
self.file.close()
if self.format == 'xz':
with lzma.open(_to_native(self.destination), 'wb') as f:
f.write(self.fileIO.getvalue())
self.fileIO.close()
def contains(self, name):
try:
self.file.getmember(name)
except KeyError:
return False
return True
def open(self):
if self.format in ('gz', 'bz2'):
self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format)
# python3 tarfile module allows xz format but for python2 we have to create the tarfile
# in memory and then compress it with lzma.
elif self.format == 'xz':
self.fileIO = io.BytesIO()
self.file = tarfile.open(fileobj=self.fileIO, mode='w')
elif self.format == 'tar':
self.file = tarfile.open(_to_native_ascii(self.destination), 'w')
else:
self.module.fail_json(msg="%s is not a valid archive format" % self.format)
def _add(self, path, archive_name):
def py27_filter(tarinfo):
return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo
def py26_filter(path):
return matches_exclusion_patterns(path, self.exclusion_patterns)
if PY27:
self.file.add(path, archive_name, recursive=False, filter=py27_filter)
else:
self.file.add(path, archive_name, recursive=False, exclude=py26_filter)
def _get_checksums(self, path):
if HAS_LZMA:
LZMAError = lzma.LZMAError
else:
# Just picking another exception that's also listed below
LZMAError = tarfile.ReadError
try:
if self.format == 'xz':
with lzma.open(_to_native_ascii(path), 'r') as f:
archive = tarfile.open(fileobj=f)
checksums = set((info.name, info.chksum) for info in archive.getmembers())
archive.close()
else:
archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format)
checksums = set((info.name, info.chksum) for info in archive.getmembers())
archive.close()
except (LZMAError, tarfile.ReadError, tarfile.CompressionError):
try:
# The python implementations of gzip, bz2, and lzma do not support restoring compressed files
# to their original names so only file checksum is returned
f = self._open_compressed_file(_to_native_ascii(path), 'r')
checksum = 0
while True:
chunk = f.read(16 * 1024 * 1024)
if not chunk:
break
checksum = crc32(chunk, checksum)
checksums = set([(b'', checksum)])
f.close()
except Exception:
checksums = set()
return checksums
def get_archive(module):
if module.params['format'] == 'zip':
return ZipArchive(module)
else:
return TarArchive(module)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='list', elements='path', required=True),
format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
dest=dict(type='path'),
exclude_path=dict(type='list', elements='path', default=[]),
exclusion_patterns=dict(type='list', elements='path'),
force_archive=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
),
add_file_common_args=True,
supports_check_mode=True,
)
if not HAS_LZMA and module.params['format'] == 'xz':
module.fail_json(
msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR
)
check_mode = module.check_mode
archive = get_archive(module)
archive.find_targets()
if not archive.has_targets():
if archive.destination_exists():
archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED
elif archive.has_targets() and archive.must_archive:
if check_mode:
archive.changed = True
else:
archive.add_targets()
archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED
archive.changed |= archive.is_different_from_original()
if archive.remove:
archive.remove_targets()
else:
if check_mode:
if not archive.destination_exists():
archive.changed = True
else:
path = archive.paths[0]
archive.add_single_target(path)
archive.changed |= archive.is_different_from_original()
if archive.remove:
archive.remove_single_target(path)
if archive.destination_exists():
archive.update_permissions()
module.exit_json(**archive.result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,216 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: atomic_container
short_description: Manage the containers on the atomic host platform
description:
- Manage the containers on the atomic host platform.
- Allows to manage the lifecycle of a container on the atomic host platform.
author: "Giuseppe Scrivano (@giuseppe)"
notes:
- Host should support C(atomic) command
requirements:
- atomic
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
backend:
description:
- Define the backend to use for the container.
required: true
choices: ["docker", "ostree"]
type: str
name:
description:
- Name of the container.
required: true
type: str
image:
description:
- The image to use to install the container.
required: true
type: str
rootfs:
description:
- Define the rootfs of the image.
type: str
state:
description:
- State of the container.
choices: ["absent", "latest", "present", "rollback"]
default: "latest"
type: str
mode:
description:
- Define if it is an user or a system container.
choices: ["user", "system"]
type: str
values:
description:
- Values for the installation of the container.
- This option is permitted only with mode 'user' or 'system'.
- The values specified here will be used at installation time as --set arguments for atomic install.
type: list
elements: str
default: []
'''
EXAMPLES = r'''
- name: Install the etcd system container
community.general.atomic_container:
name: etcd
image: rhel/etcd
backend: ostree
state: latest
mode: system
values:
- ETCD_NAME=etcd.server
- name: Uninstall the etcd system container
community.general.atomic_container:
name: etcd
image: rhel/etcd
backend: ostree
state: absent
mode: system
'''
RETURN = r'''
msg:
description: The command standard output
returned: always
type: str
sample: 'Using default tag: latest ...'
'''
# import module snippets
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def do_install(module, mode, rootfs, container, image, values_list, backend):
system_list = ["--system"] if mode == 'system' else []
user_list = ["--user"] if mode == 'user' else []
rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
atomic_bin = module.get_bin_path('atomic')
args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
def do_update(module, container, image, values_list):
atomic_bin = module.get_bin_path('atomic')
args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
def do_uninstall(module, name, backend):
atomic_bin = module.get_bin_path('atomic')
args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
module.exit_json(msg=out, changed=True)
def do_rollback(module, name):
atomic_bin = module.get_bin_path('atomic')
args = [atomic_bin, 'containers', 'rollback', name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Rolling back" in out
module.exit_json(msg=out, changed=changed)
def core(module):
mode = module.params['mode']
name = module.params['name']
image = module.params['image']
rootfs = module.params['rootfs']
values = module.params['values']
backend = module.params['backend']
state = module.params['state']
atomic_bin = module.get_bin_path('atomic')
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
values_list = ["--set=%s" % x for x in values] if values else []
args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return
present = name in out
if state == 'present' and present:
module.exit_json(msg=out, changed=False)
elif (state in ['latest', 'present']) and not present:
do_install(module, mode, rootfs, name, image, values_list, backend)
elif state == 'latest':
do_update(module, name, image, values_list)
elif state == 'absent':
if not present:
module.exit_json(msg="The container is not present", changed=False)
else:
do_uninstall(module, name, backend)
elif state == 'rollback':
do_rollback(module, name)
def main():
module = AnsibleModule(
argument_spec=dict(
mode=dict(choices=['user', 'system']),
name=dict(required=True),
image=dict(required=True),
rootfs=dict(),
state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
backend=dict(required=True, choices=['docker', 'ostree']),
values=dict(type='list', default=[], elements='str'),
),
)
if module.params['values'] is not None and module.params['mode'] == 'default':
module.fail_json(msg="values is supported only with user or system mode")
# Verify that the platform supports atomic command
dummy = module.get_bin_path('atomic', required=True)
try:
core(module)
except Exception as e:
module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,104 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: atomic_host
short_description: Manage the atomic host platform
description:
- Manage the atomic host platform.
- Rebooting of Atomic host platform should be done outside this module.
author:
- Saravanan KR (@krsacme)
notes:
- Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
requirements:
- atomic
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
revision:
description:
- The version number of the atomic host to be deployed.
- Providing V(latest) will upgrade to the latest available version.
default: 'latest'
aliases: [ version ]
type: str
'''
EXAMPLES = r'''
- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
community.general.atomic_host:
revision: latest
- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
community.general.atomic_host:
revision: 23.130
'''
RETURN = r'''
msg:
description: The command standard output
returned: always
type: str
sample: 'Already on latest'
'''
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def core(module):
revision = module.params['revision']
atomic_bin = module.get_bin_path('atomic', required=True)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
if revision == 'latest':
args = [atomic_bin, 'host', 'upgrade']
else:
args = [atomic_bin, 'host', 'deploy', revision]
rc, out, err = module.run_command(args, check_rc=False)
if rc == 77 and revision == 'latest':
module.exit_json(msg="Already on latest", changed=False)
elif rc != 0:
module.fail_json(rc=rc, msg=err)
else:
module.exit_json(msg=out, changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
revision=dict(type='str', default='latest', aliases=["version"]),
),
)
# Verify that the platform is atomic host
if not os.path.exists("/run/ostree-booted"):
module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,176 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: atomic_image
short_description: Manage the container images on the atomic host platform
description:
- Manage the container images on the atomic host platform.
- Allows to execute the commands specified by the RUN label in the container image when present.
author:
- Saravanan KR (@krsacme)
notes:
- Host should support C(atomic) command.
requirements:
- atomic
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
backend:
description:
- Define the backend where the image is pulled.
choices: [ 'docker', 'ostree' ]
type: str
name:
description:
- Name of the container image.
required: true
type: str
state:
description:
- The state of the container image.
- The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
choices: [ 'absent', 'latest', 'present' ]
default: 'latest'
type: str
started:
description:
- Start or Stop the container.
type: bool
default: true
'''
EXAMPLES = r'''
- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
community.general.atomic_image:
name: rhel7/rsyslog
state: latest
- name: Pull busybox to the OSTree backend
community.general.atomic_image:
name: busybox
state: latest
backend: ostree
'''
RETURN = r'''
msg:
description: The command standard output
returned: always
type: str
sample: 'Using default tag: latest ...'
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def do_upgrade(module, image):
atomic_bin = module.get_bin_path('atomic')
args = [atomic_bin, 'update', '--force', image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=err)
elif 'Image is up to date' in out:
return False
return True
def core(module):
image = module.params['name']
state = module.params['state']
started = module.params['started']
backend = module.params['backend']
is_upgraded = False
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
atomic_bin = module.get_bin_path('atomic')
out = {}
err = {}
rc = 0
if backend:
if state == 'present' or state == 'latest':
args = [atomic_bin, 'pull', "--storage=%s" % backend, image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
else:
out_run = ""
if started:
args = [atomic_bin, 'run', "--storage=%s" % backend, image]
rc, out_run, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=(out + out_run), changed=changed)
elif state == 'absent':
args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Unable to find" not in out
module.exit_json(msg=out, changed=changed)
return
if state == 'present' or state == 'latest':
if state == 'latest':
is_upgraded = do_upgrade(module, image)
if started:
args = [atomic_bin, 'run', image]
else:
args = [atomic_bin, 'install', image]
elif state == 'absent':
args = [atomic_bin, 'uninstall', image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
elif rc == 1 and 'already present' in err:
module.exit_json(restult=err, changed=is_upgraded)
elif started and 'Container is running' in out:
module.exit_json(result=out, changed=is_upgraded)
else:
module.exit_json(msg=out, changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
backend=dict(type='str', choices=['docker', 'ostree']),
name=dict(type='str', required=True),
state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
started=dict(type='bool', default=True),
),
)
# Verify that the platform supports atomic command
dummy = module.get_bin_path('atomic', required=True)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,164 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Ted Trask <ttrask01@yahoo.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: awall
short_description: Manage awall policies
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
description:
- This modules allows for enable/disable/activate of C(awall) policies.
- Alpine Wall (C(awall)) generates a firewall configuration from the enabled policy files
and activates the configuration on the system.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- One or more policy names.
type: list
elements: str
state:
description:
- Whether the policies should be enabled or disabled.
type: str
choices: [ disabled, enabled ]
default: enabled
activate:
description:
- Activate the new firewall rules.
- Can be run with other steps or on its own.
- Idempotency is affected if O(activate=true), as the module will always report a changed state.
type: bool
default: false
notes:
- At least one of O(name) and O(activate) is required.
'''
EXAMPLES = r'''
- name: Enable "foo" and "bar" policy
community.general.awall:
name: [ foo bar ]
state: enabled
- name: Disable "foo" and "bar" policy and activate new rules
community.general.awall:
name:
- foo
- bar
state: disabled
activate: false
- name: Activate currently enabled firewall rules
community.general.awall:
activate: true
'''
RETURN = ''' # '''
import re
from ansible.module_utils.basic import AnsibleModule
def activate(module):
cmd = "%s activate --force" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
return True
else:
module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
def is_policy_enabled(module, name):
cmd = "%s list" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd)
if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
return True
return False
def enable_policy(module, names, act):
policies = []
for name in names:
if not is_policy_enabled(module, name):
policies.append(name)
if not policies:
module.exit_json(changed=False, msg="policy(ies) already enabled")
names = " ".join(policies)
if module.check_mode:
cmd = "%s list" % (AWALL_PATH)
else:
cmd = "%s enable %s" % (AWALL_PATH, names)
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
if act and not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
def disable_policy(module, names, act):
policies = []
for name in names:
if is_policy_enabled(module, name):
policies.append(name)
if not policies:
module.exit_json(changed=False, msg="policy(ies) already disabled")
names = " ".join(policies)
if module.check_mode:
cmd = "%s list" % (AWALL_PATH)
else:
cmd = "%s disable %s" % (AWALL_PATH, names)
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
if act and not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
name=dict(type='list', elements='str'),
activate=dict(type='bool', default=False),
),
required_one_of=[['name', 'activate']],
supports_check_mode=True,
)
global AWALL_PATH
AWALL_PATH = module.get_bin_path('awall', required=True)
p = module.params
if p['name']:
if p['state'] == 'enabled':
enable_policy(module, p['name'], p['activate'])
elif p['state'] == 'disabled':
disable_policy(module, p['name'], p['activate'])
if p['activate']:
if not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="activated awall rules")
module.fail_json(msg="no action defined")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,415 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: beadm
short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems
description:
- Create, delete or activate ZFS boot environments.
- Mount and unmount ZFS boot environments.
author: Adam Števko (@xen0l)
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- ZFS boot environment name.
type: str
required: true
aliases: [ "be" ]
snapshot:
description:
- If specified, the new boot environment will be cloned from the given
snapshot or inactive boot environment.
type: str
description:
description:
- Associate a description with a new boot environment. This option is
available only on Solarish platforms.
type: str
options:
description:
- Create the datasets for new BE with specific ZFS properties.
- Multiple options can be specified.
- This option is available only on Solarish platforms.
type: str
mountpoint:
description:
- Path where to mount the ZFS boot environment.
type: path
state:
description:
- Create or delete ZFS boot environment.
type: str
choices: [ absent, activated, mounted, present, unmounted ]
default: present
force:
description:
- Specifies if the unmount should be forced.
type: bool
default: false
'''
EXAMPLES = r'''
- name: Create ZFS boot environment
community.general.beadm:
name: upgrade-be
state: present
- name: Create ZFS boot environment from existing inactive boot environment
community.general.beadm:
name: upgrade-be
snapshot: be@old
state: present
- name: Create ZFS boot environment with compression enabled and description "upgrade"
community.general.beadm:
name: upgrade-be
options: "compression=on"
description: upgrade
state: present
- name: Delete ZFS boot environment
community.general.beadm:
name: old-be
state: absent
- name: Mount ZFS boot environment on /tmp/be
community.general.beadm:
name: BE
mountpoint: /tmp/be
state: mounted
- name: Unmount ZFS boot environment
community.general.beadm:
name: BE
state: unmounted
- name: Activate ZFS boot environment
community.general.beadm:
name: upgrade-be
state: activated
'''
RETURN = r'''
name:
description: BE name
returned: always
type: str
sample: pre-upgrade
snapshot:
description: ZFS snapshot to create BE from
returned: always
type: str
sample: rpool/ROOT/oi-hipster@fresh
description:
description: BE description
returned: always
type: str
sample: Upgrade from 9.0 to 10.0
options:
description: BE additional options
returned: always
type: str
sample: compression=on
mountpoint:
description: BE mountpoint
returned: always
type: str
sample: /mnt/be
state:
description: state of the target
returned: always
type: str
sample: present
force:
description: If forced action is wanted
returned: always
type: bool
sample: false
'''
import os
from ansible.module_utils.basic import AnsibleModule
class BE(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.snapshot = module.params['snapshot']
self.description = module.params['description']
self.options = module.params['options']
self.mountpoint = module.params['mountpoint']
self.state = module.params['state']
self.force = module.params['force']
self.is_freebsd = os.uname()[0] == 'FreeBSD'
def _beadm_list(self):
cmd = [self.module.get_bin_path('beadm'), 'list', '-H']
if '@' in self.name:
cmd.append('-s')
return self.module.run_command(cmd)
def _find_be_by_name(self, out):
if '@' in self.name:
for line in out.splitlines():
if self.is_freebsd:
check = line.split()
if check == []:
continue
full_name = check[0].split('/')
if full_name == []:
continue
check[0] = full_name[len(full_name) - 1]
if check[0] == self.name:
return check
else:
check = line.split(';')
if check[0] == self.name:
return check
else:
for line in out.splitlines():
if self.is_freebsd:
check = line.split()
if check[0] == self.name:
return check
else:
check = line.split(';')
if check[0] == self.name:
return check
return None
def exists(self):
(rc, out, dummy) = self._beadm_list()
if rc == 0:
if self._find_be_by_name(out):
return True
else:
return False
else:
return False
def is_activated(self):
(rc, out, dummy) = self._beadm_list()
if rc == 0:
line = self._find_be_by_name(out)
if line is None:
return False
if self.is_freebsd:
if 'R' in line[1]:
return True
else:
if 'R' in line[2]:
return True
return False
def activate_be(self):
cmd = [self.module.get_bin_path('beadm'), 'activate', self.name]
return self.module.run_command(cmd)
def create_be(self):
cmd = [self.module.get_bin_path('beadm'), 'create']
if self.snapshot:
cmd.extend(['-e', self.snapshot])
if not self.is_freebsd:
if self.description:
cmd.extend(['-d', self.description])
if self.options:
cmd.extend(['-o', self.options])
cmd.append(self.name)
return self.module.run_command(cmd)
def destroy_be(self):
cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name]
return self.module.run_command(cmd)
def is_mounted(self):
(rc, out, dummy) = self._beadm_list()
if rc == 0:
line = self._find_be_by_name(out)
if line is None:
return False
if self.is_freebsd:
# On FreeBSD, we exclude currently mounted BE on /, as it is
# special and can be activated even if it is mounted. That is not
# possible with non-root BEs.
if line[2] != '-' and line[2] != '/':
return True
else:
if line[3]:
return True
return False
def mount_be(self):
cmd = [self.module.get_bin_path('beadm'), 'mount', self.name]
if self.mountpoint:
cmd.append(self.mountpoint)
return self.module.run_command(cmd)
def unmount_be(self):
cmd = [self.module.get_bin_path('beadm'), 'unmount']
if self.force:
cmd.append('-f')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['be']),
snapshot=dict(type='str'),
description=dict(type='str'),
options=dict(type='str'),
mountpoint=dict(type='path'),
state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
force=dict(type='bool', default=False),
),
supports_check_mode=True,
)
be = BE(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = be.name
result['state'] = be.state
if be.snapshot:
result['snapshot'] = be.snapshot
if be.description:
result['description'] = be.description
if be.options:
result['options'] = be.options
if be.mountpoint:
result['mountpoint'] = be.mountpoint
if be.state == 'absent':
# beadm on FreeBSD and Solarish systems differs in delete behaviour in
# that we are not allowed to delete activated BE on FreeBSD while on
# Solarish systems we cannot delete BE if it is mounted. We add mount
# check for both platforms as BE should be explicitly unmounted before
# being deleted. On FreeBSD, we also check if the BE is activated.
if be.exists():
if not be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
if be.is_freebsd:
if be.is_activated():
module.fail_json(msg='Unable to remove active BE!')
(rc, out, err) = be.destroy_be()
if rc != 0:
module.fail_json(msg='Error while destroying BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
else:
module.fail_json(msg='Unable to remove BE as it is mounted!')
elif be.state == 'present':
if not be.exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.create_be()
if rc != 0:
module.fail_json(msg='Error while creating BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'activated':
if not be.is_activated():
if module.check_mode:
module.exit_json(changed=True)
# On FreeBSD, beadm is unable to activate mounted BEs, so we add
# an explicit check for that case.
if be.is_freebsd:
if be.is_mounted():
module.fail_json(msg='Unable to activate mounted BE!')
(rc, out, err) = be.activate_be()
if rc != 0:
module.fail_json(msg='Error while activating BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'mounted':
if not be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.mount_be()
if rc != 0:
module.fail_json(msg='Error while mounting BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'unmounted':
if be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.unmount_be()
if rc != 0:
module.fail_json(msg='Error while unmounting BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,175 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: bearychat
short_description: Send BearyChat notifications
description:
- The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
via the Incoming Robot integration.
author: "Jiangge Zhang (@tonyseek)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
url:
type: str
description:
- BearyChat WebHook URL. This authenticates you to the bearychat
service. It looks like
V(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
required: true
text:
type: str
description:
- Message to send.
markdown:
description:
- If V(true), text will be parsed as markdown.
default: true
type: bool
channel:
type: str
description:
- Channel to send the message to. If absent, the message goes to the
default channel selected by the O(url).
attachments:
type: list
elements: dict
description:
- Define a list of attachments. For more information, see
https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
'''
EXAMPLES = """
- name: Send notification message via BearyChat
local_action:
module: bearychat
url: |
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
text: "{{ inventory_hostname }} completed"
- name: Send notification message via BearyChat all options
local_action:
module: bearychat
url: |
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
text: "{{ inventory_hostname }} completed"
markdown: false
channel: "#ansible"
attachments:
- title: "Ansible on {{ inventory_hostname }}"
text: "May the Force be with you."
color: "#ffffff"
images:
- http://example.com/index.png
"""
RETURN = """
msg:
description: execution result
returned: success
type: str
sample: "OK"
"""
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
HAS_URLPARSE = True
except Exception:
HAS_URLPARSE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def build_payload_for_bearychat(module, text, markdown, channel, attachments):
payload = {}
if text is not None:
payload['text'] = text
if markdown is not None:
payload['markdown'] = markdown
if channel is not None:
payload['channel'] = channel
if attachments is not None:
payload.setdefault('attachments', []).extend(
build_payload_for_bearychat_attachment(
module, item.get('title'), item.get('text'), item.get('color'),
item.get('images'))
for item in attachments)
payload = 'payload=%s' % module.jsonify(payload)
return payload
def build_payload_for_bearychat_attachment(module, title, text, color, images):
attachment = {}
if title is not None:
attachment['title'] = title
if text is not None:
attachment['text'] = text
if color is not None:
attachment['color'] = color
if images is not None:
target_images = attachment.setdefault('images', [])
if not isinstance(images, (list, tuple)):
images = [images]
for image in images:
if isinstance(image, dict) and 'url' in image:
image = {'url': image['url']}
elif hasattr(image, 'startswith') and image.startswith('http'):
image = {'url': image}
else:
module.fail_json(
msg="BearyChat doesn't have support for this kind of "
"attachment image")
target_images.append(image)
return attachment
def do_notify_bearychat(module, url, payload):
response, info = fetch_url(module, url, data=payload)
if info['status'] != 200:
url_info = urlparse(url)
obscured_incoming_webhook = urlunparse(
(url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
module.fail_json(
msg=" failed to send %s to %s: %s" % (
payload, obscured_incoming_webhook, info['msg']))
def main():
module = AnsibleModule(argument_spec={
'url': dict(type='str', required=True, no_log=True),
'text': dict(type='str'),
'markdown': dict(default=True, type='bool'),
'channel': dict(type='str'),
'attachments': dict(type='list', elements='dict'),
})
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
url = module.params['url']
text = module.params['text']
markdown = module.params['markdown']
channel = module.params['channel']
attachments = module.params['attachments']
payload = build_payload_for_bearychat(
module, text, markdown, channel, attachments)
do_notify_bearychat(module, url, payload)
module.exit_json(msg="OK")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,226 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: bigpanda
author: "Hagai Kariti (@hkariti)"
short_description: Notify BigPanda about deployments
description:
- Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
component:
type: str
description:
- "The name of the component being deployed. Ex: billing"
required: true
aliases: ['name']
version:
type: str
description:
- The deployment version.
required: true
token:
type: str
description:
- API token.
required: true
state:
type: str
description:
- State of the deployment.
required: true
choices: ['started', 'finished', 'failed']
hosts:
type: str
description:
- Name of affected host name. Can be a list.
- If not specified, it defaults to the remote system's hostname.
required: false
aliases: ['host']
env:
type: str
description:
- The environment name, typically 'production', 'staging', etc.
required: false
owner:
type: str
description:
- The person responsible for the deployment.
required: false
description:
type: str
description:
- Free text description of the deployment.
required: false
url:
type: str
description:
- Base URL of the API server.
required: false
default: "https://api.bigpanda.io"
validate_certs:
description:
- If V(false), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: true
type: bool
deployment_message:
type: str
description:
- Message about the deployment.
version_added: '0.2.0'
source_system:
type: str
description:
- Source system used in the requests to the API
default: ansible
# informational: requirements for nodes
requirements: [ ]
'''
EXAMPLES = '''
- name: Notify BigPanda about a deployment
community.general.bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
state: started
- name: Notify BigPanda about a deployment
community.general.bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
state: finished
# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
- name: Notify BigPanda about a deployment
community.general.bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
hosts: '{{ ansible_hostname }}'
state: started
delegate_to: localhost
register: deployment
- name: Notify BigPanda about a deployment
community.general.bigpanda:
component: '{{ deployment.component }}'
version: '{{ deployment.version }}'
token: '{{ deployment.token }}'
state: finished
delegate_to: localhost
'''
# ===========================================
# Module execution.
#
import json
import socket
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.urls import fetch_url
def main():
module = AnsibleModule(
argument_spec=dict(
component=dict(required=True, aliases=['name']),
version=dict(required=True),
token=dict(required=True, no_log=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
hosts=dict(required=False, aliases=['host']),
env=dict(required=False),
owner=dict(required=False),
description=dict(required=False),
deployment_message=dict(required=False),
source_system=dict(required=False, default='ansible'),
validate_certs=dict(default=True, type='bool'),
url=dict(required=False, default='https://api.bigpanda.io'),
),
supports_check_mode=True,
)
token = module.params['token']
state = module.params['state']
url = module.params['url']
# Build the common request body
body = dict()
for k in ('component', 'version', 'hosts'):
v = module.params[k]
if v is not None:
body[k] = v
if body.get('hosts') is None:
body['hosts'] = [socket.gethostname()]
if not isinstance(body['hosts'], list):
body['hosts'] = [body['hosts']]
# Insert state-specific attributes to body
if state == 'started':
for k in ('source_system', 'env', 'owner', 'description'):
v = module.params[k]
if v is not None:
body[k] = v
request_url = url + '/data/events/deployments/start'
else:
message = module.params['deployment_message']
if message is not None:
body['errorMessage'] = message
if state == 'finished':
body['status'] = 'success'
else:
body['status'] = 'failure'
request_url = url + '/data/events/deployments/end'
# Build the deployment object we return
deployment = dict(token=token, url=url)
deployment.update(body)
if 'errorMessage' in deployment:
message = deployment.pop('errorMessage')
deployment['message'] = message
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True, **deployment)
# Send the data to bigpanda
data = json.dumps(body)
headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
try:
response, info = fetch_url(module, request_url, data=data, headers=headers)
if info['status'] == 200:
module.exit_json(changed=True, **deployment)
else:
module.fail_json(msg=json.dumps(info))
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,281 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bitbucket_access_key
short_description: Manages Bitbucket repository access keys
description:
- Manages Bitbucket repository access keys (also called deploy keys).
author:
- Evgeniy Krysanov (@catcombo)
extends_documentation_fragment:
- community.general.bitbucket
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
repository:
description:
- The repository name.
type: str
required: true
workspace:
description:
- The repository owner.
- "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
key:
description:
- The SSH public key.
type: str
label:
description:
- The key label.
type: str
required: true
state:
description:
- Indicates desired state of the access key.
type: str
required: true
choices: [ absent, present ]
notes:
- Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories.
- Check mode is supported.
'''
EXAMPLES = r'''
- name: Create access key
community.general.bitbucket_access_key:
repository: 'bitbucket-repo'
workspace: bitbucket_workspace
key: '{{lookup("file", "bitbucket.pub") }}'
label: 'Bitbucket'
state: present
- name: Delete access key
community.general.bitbucket_access_key:
repository: bitbucket-repo
workspace: bitbucket_workspace
label: Bitbucket
state: absent
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
error_messages = {
'required_key': '`key` is required when the `state` is `present`',
'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository',
'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`',
'invalid_key': 'Invalid SSH key or key is already in use',
}
BITBUCKET_API_ENDPOINTS = {
'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL,
'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL,
}
def get_existing_deploy_key(module, bitbucket):
"""
Search for an existing deploy key on Bitbucket
with the label specified in module param `label`
:param module: instance of the :class:`AnsibleModule`
:param bitbucket: instance of the :class:`BitbucketHelper`
:return: existing deploy key or None if not found
:rtype: dict or None
Return example::
{
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
}
"""
content = {
'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
)
}
# Look through the all response pages in search of deploy key we need
while 'next' in content:
info, content = bitbucket.request(
api_url=content['next'],
method='GET',
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
if info['status'] == 403:
module.fail_json(msg=error_messages['required_permission'])
if info['status'] != 200:
module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info))
res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None)
if res is not None:
return res
return None
def create_deploy_key(module, bitbucket):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
),
method='POST',
data={
'key': module.params['key'],
'label': module.params['label'],
},
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
if info['status'] == 403:
module.fail_json(msg=error_messages['required_permission'])
if info['status'] == 400:
module.fail_json(msg=error_messages['invalid_key'])
if info['status'] != 200:
module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format(
label=module.params['label'],
info=info,
))
def delete_deploy_key(module, bitbucket, key_id):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
key_id=key_id,
),
method='DELETE',
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
if info['status'] == 403:
module.fail_json(msg=error_messages['required_permission'])
if info['status'] != 204:
module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format(
label=module.params['label'],
info=info,
))
def main():
argument_spec = BitbucketHelper.bitbucket_argument_spec()
argument_spec.update(
repository=dict(type='str', required=True),
workspace=dict(
type='str', required=True,
),
key=dict(type='str', no_log=False),
label=dict(type='str', required=True),
state=dict(type='str', choices=['present', 'absent'], required=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
required_together=BitbucketHelper.bitbucket_required_together(),
)
bitbucket = BitbucketHelper(module)
key = module.params['key']
state = module.params['state']
# Check parameters
if (key is None) and (state == 'present'):
module.fail_json(msg=error_messages['required_key'])
# Retrieve access token for authorized API requests
bitbucket.fetch_access_token()
# Retrieve existing deploy key (if any)
existing_deploy_key = get_existing_deploy_key(module, bitbucket)
changed = False
# Create new deploy key in case it doesn't exists
if not existing_deploy_key and (state == 'present'):
if not module.check_mode:
create_deploy_key(module, bitbucket)
changed = True
# Update deploy key if the old value does not match the new one
elif existing_deploy_key and (state == 'present'):
if not key.startswith(existing_deploy_key.get('key')):
if not module.check_mode:
# Bitbucket doesn't support update key for the same label,
# so we need to delete the old one first
delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
create_deploy_key(module, bitbucket)
changed = True
# Delete deploy key
elif existing_deploy_key and (state == 'absent'):
if not module.check_mode:
delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,207 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bitbucket_pipeline_key_pair
short_description: Manages Bitbucket pipeline SSH key pair
description:
- Manages Bitbucket pipeline SSH key pair.
author:
- Evgeniy Krysanov (@catcombo)
extends_documentation_fragment:
- community.general.bitbucket
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
repository:
description:
- The repository name.
type: str
required: true
workspace:
description:
- The repository owner.
- "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
public_key:
description:
- The public key.
type: str
private_key:
description:
- The private key.
type: str
state:
description:
- Indicates desired state of the key pair.
type: str
required: true
choices: [ absent, present ]
notes:
- Check mode is supported.
'''
EXAMPLES = r'''
- name: Create or update SSH key pair
community.general.bitbucket_pipeline_key_pair:
repository: 'bitbucket-repo'
workspace: bitbucket_workspace
public_key: '{{lookup("file", "bitbucket.pub") }}'
private_key: '{{lookup("file", "bitbucket") }}'
state: present
- name: Remove SSH key pair
community.general.bitbucket_pipeline_key_pair:
repository: bitbucket-repo
workspace: bitbucket_workspace
state: absent
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
error_messages = {
'invalid_params': 'Account, repository or SSH key pair was not found',
'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
}
BITBUCKET_API_ENDPOINTS = {
'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
}
def get_existing_ssh_key_pair(module, bitbucket):
"""
Retrieves an existing ssh key pair from repository
specified in module param `repository`
:param module: instance of the :class:`AnsibleModule`
:param bitbucket: instance of the :class:`BitbucketHelper`
:return: existing key pair or None if not found
:rtype: dict or None
Return example::
{
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
"type": "pipeline_ssh_key_pair"
}
"""
api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
)
info, content = bitbucket.request(
api_url=api_url,
method='GET',
)
if info['status'] == 404:
# Account, repository or SSH key pair was not found.
return None
return content
def update_ssh_key_pair(module, bitbucket):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
),
method='PUT',
data={
'private_key': module.params['private_key'],
'public_key': module.params['public_key'],
},
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_params'])
if info['status'] != 200:
module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
def delete_ssh_key_pair(module, bitbucket):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
),
method='DELETE',
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_params'])
if info['status'] != 204:
module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
def main():
argument_spec = BitbucketHelper.bitbucket_argument_spec()
argument_spec.update(
repository=dict(type='str', required=True),
workspace=dict(type='str', required=True),
public_key=dict(type='str'),
private_key=dict(type='str', no_log=True),
state=dict(type='str', choices=['present', 'absent'], required=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
required_together=BitbucketHelper.bitbucket_required_together(),
)
bitbucket = BitbucketHelper(module)
state = module.params['state']
public_key = module.params['public_key']
private_key = module.params['private_key']
# Check parameters
if ((public_key is None) or (private_key is None)) and (state == 'present'):
module.fail_json(msg=error_messages['required_keys'])
# Retrieve access token for authorized API requests
bitbucket.fetch_access_token()
# Retrieve existing ssh key
key_pair = get_existing_ssh_key_pair(module, bitbucket)
changed = False
# Create or update key pair
if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
if not module.check_mode:
update_ssh_key_pair(module, bitbucket)
changed = True
# Delete key pair
elif key_pair and (state == 'absent'):
if not module.check_mode:
delete_ssh_key_pair(module, bitbucket)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,304 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bitbucket_pipeline_known_host
short_description: Manages Bitbucket pipeline known hosts
description:
- Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
- The host fingerprint will be retrieved automatically, but in case of an error, one can use O(key) field to specify it manually.
author:
- Evgeniy Krysanov (@catcombo)
extends_documentation_fragment:
- community.general.bitbucket
- community.general.attributes
requirements:
- paramiko
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
repository:
description:
- The repository name.
type: str
required: true
workspace:
description:
- The repository owner.
- "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
name:
description:
- The FQDN of the known host.
type: str
required: true
key:
description:
- The public key.
type: str
state:
description:
- Indicates desired state of the record.
type: str
required: true
choices: [ absent, present ]
notes:
- Check mode is supported.
'''
EXAMPLES = r'''
- name: Create known hosts from the list
community.general.bitbucket_pipeline_known_host:
repository: 'bitbucket-repo'
workspace: bitbucket_workspace
name: '{{ item }}'
state: present
with_items:
- bitbucket.org
- example.com
- name: Remove known host
community.general.bitbucket_pipeline_known_host:
repository: bitbucket-repo
workspace: bitbucket_workspace
name: bitbucket.org
state: absent
- name: Specify public key file
community.general.bitbucket_pipeline_known_host:
repository: bitbucket-repo
workspace: bitbucket_workspace
name: bitbucket.org
key: '{{lookup("file", "bitbucket.pub") }}'
state: absent
'''
RETURN = r''' # '''
import socket
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
error_messages = {
'invalid_params': 'Account or repository was not found',
'unknown_key_type': 'Public key type is unknown',
}
BITBUCKET_API_ENDPOINTS = {
'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL,
'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
}
def get_existing_known_host(module, bitbucket):
"""
Search for a host in Bitbucket pipelines known hosts
with the name specified in module param `name`
:param module: instance of the :class:`AnsibleModule`
:param bitbucket: instance of the :class:`BitbucketHelper`
:return: existing host or None if not found
:rtype: dict or None
Return example::
{
'type': 'pipeline_known_host',
'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}'
'hostname': 'bitbucket.org',
'public_key': {
'type': 'pipeline_ssh_public_key',
'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
'key_type': 'ssh-rsa',
'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
},
}
"""
content = {
'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
)
}
# Look through all response pages in search of hostname we need
while 'next' in content:
info, content = bitbucket.request(
api_url=content['next'],
method='GET',
)
if info['status'] == 404:
module.fail_json(msg='Invalid `repository` or `workspace`.')
if info['status'] != 200:
module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info))
host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None)
if host is not None:
return host
return None
def get_host_key(module, hostname):
"""
Fetches public key for specified host
:param module: instance of the :class:`AnsibleModule`
:param hostname: host name
:return: key type and key content
:rtype: tuple
Return example::
(
'ssh-rsa',
'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==',
)
"""
try:
sock = socket.socket()
sock.connect((hostname, 22))
except socket.error:
module.fail_json(msg='Error opening socket to {0}'.format(hostname))
try:
trans = paramiko.transport.Transport(sock)
trans.start_client()
host_key = trans.get_remote_server_key()
except paramiko.SSHException:
module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname))
trans.close()
sock.close()
key_type = host_key.get_name()
key = host_key.get_base64()
return key_type, key
def create_known_host(module, bitbucket):
hostname = module.params['name']
key_param = module.params['key']
if key_param is None:
key_type, key = get_host_key(module, hostname)
elif ' ' in key_param:
key_type, key = key_param.split(' ', 1)
else:
module.fail_json(msg=error_messages['unknown_key_type'])
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
),
method='POST',
data={
'hostname': hostname,
'public_key': {
'key_type': key_type,
'key': key,
}
},
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_params'])
if info['status'] != 201:
module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format(
hostname=module.params['hostname'],
info=info,
))
def delete_known_host(module, bitbucket, known_host_uuid):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
known_host_uuid=known_host_uuid,
),
method='DELETE',
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_params'])
if info['status'] != 204:
module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format(
hostname=module.params['name'],
info=info,
))
def main():
argument_spec = BitbucketHelper.bitbucket_argument_spec()
argument_spec.update(
repository=dict(type='str', required=True),
workspace=dict(type='str', required=True),
name=dict(type='str', required=True),
key=dict(type='str', no_log=False),
state=dict(type='str', choices=['present', 'absent'], required=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
required_together=BitbucketHelper.bitbucket_required_together(),
)
if (module.params['key'] is None) and (not HAS_PARAMIKO):
module.fail_json(msg='`paramiko` package not found, please install it.')
bitbucket = BitbucketHelper(module)
# Retrieve access token for authorized API requests
bitbucket.fetch_access_token()
# Retrieve existing known host
existing_host = get_existing_known_host(module, bitbucket)
state = module.params['state']
changed = False
# Create new host in case it doesn't exists
if not existing_host and (state == 'present'):
if not module.check_mode:
create_known_host(module, bitbucket)
changed = True
# Delete host
elif existing_host and (state == 'absent'):
if not module.check_mode:
delete_known_host(module, bitbucket, existing_host['uuid'])
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,276 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bitbucket_pipeline_variable
short_description: Manages Bitbucket pipeline variables
description:
- Manages Bitbucket pipeline variables.
author:
- Evgeniy Krysanov (@catcombo)
extends_documentation_fragment:
- community.general.bitbucket
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
repository:
description:
- The repository name.
type: str
required: true
workspace:
description:
- The repository owner.
- "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
name:
description:
- The pipeline variable name.
type: str
required: true
value:
description:
- The pipeline variable value.
type: str
secured:
description:
- Whether to encrypt the variable value.
type: bool
default: false
state:
description:
- Indicates desired state of the variable.
type: str
required: true
choices: [ absent, present ]
notes:
- Check mode is supported.
- For secured values return parameter C(changed) is always V(true).
'''
EXAMPLES = r'''
- name: Create or update pipeline variables from the list
community.general.bitbucket_pipeline_variable:
repository: 'bitbucket-repo'
workspace: bitbucket_workspace
name: '{{ item.name }}'
value: '{{ item.value }}'
secured: '{{ item.secured }}'
state: present
with_items:
- { name: AWS_ACCESS_KEY, value: ABCD1234, secured: false }
- { name: AWS_SECRET, value: qwe789poi123vbn0, secured: true }
- name: Remove pipeline variable
community.general.bitbucket_pipeline_variable:
repository: bitbucket-repo
workspace: bitbucket_workspace
name: AWS_ACCESS_KEY
state: absent
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule, _load_params
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
error_messages = {
'required_value': '`value` is required when the `state` is `present`',
}
BITBUCKET_API_ENDPOINTS = {
'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL,
'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
}
def get_existing_pipeline_variable(module, bitbucket):
"""
Search for a pipeline variable
:param module: instance of the :class:`AnsibleModule`
:param bitbucket: instance of the :class:`BitbucketHelper`
:return: existing variable or None if not found
:rtype: dict or None
Return example::
{
'name': 'AWS_ACCESS_OBKEY_ID',
'value': 'x7HU80-a2',
'type': 'pipeline_variable',
'secured': False,
'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}'
}
The `value` key in dict is absent in case of secured variable.
"""
variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
)
# Look through the all response pages in search of variable we need
page = 1
while True:
next_url = "%s?page=%s" % (variables_base_url, page)
info, content = bitbucket.request(
api_url=next_url,
method='GET',
)
if info['status'] == 404:
module.fail_json(msg='Invalid `repository` or `workspace`.')
if info['status'] != 200:
module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info))
# We are at the end of list
if 'pagelen' in content and content['pagelen'] == 0:
return None
page += 1
var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None)
if var is not None:
var['name'] = var.pop('key')
return var
def create_pipeline_variable(module, bitbucket):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
),
method='POST',
data={
'key': module.params['name'],
'value': module.params['value'],
'secured': module.params['secured'],
},
)
if info['status'] != 201:
module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format(
name=module.params['name'],
info=info,
))
def update_pipeline_variable(module, bitbucket, variable_uuid):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
variable_uuid=variable_uuid,
),
method='PUT',
data={
'value': module.params['value'],
'secured': module.params['secured'],
},
)
if info['status'] != 200:
module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format(
name=module.params['name'],
info=info,
))
def delete_pipeline_variable(module, bitbucket, variable_uuid):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
workspace=module.params['workspace'],
repo_slug=module.params['repository'],
variable_uuid=variable_uuid,
),
method='DELETE',
)
if info['status'] != 204:
module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format(
name=module.params['name'],
info=info,
))
class BitBucketPipelineVariable(AnsibleModule):
def __init__(self, *args, **kwargs):
params = _load_params() or {}
if params.get('secured'):
kwargs['argument_spec']['value'].update({'no_log': True})
super(BitBucketPipelineVariable, self).__init__(*args, **kwargs)
def main():
argument_spec = BitbucketHelper.bitbucket_argument_spec()
argument_spec.update(
repository=dict(type='str', required=True),
workspace=dict(type='str', required=True),
name=dict(type='str', required=True),
value=dict(type='str'),
secured=dict(type='bool', default=False),
state=dict(type='str', choices=['present', 'absent'], required=True),
)
module = BitBucketPipelineVariable(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
required_together=BitbucketHelper.bitbucket_required_together(),
)
bitbucket = BitbucketHelper(module)
value = module.params['value']
state = module.params['state']
secured = module.params['secured']
# Check parameters
if (value is None) and (state == 'present'):
module.fail_json(msg=error_messages['required_value'])
# Retrieve access token for authorized API requests
bitbucket.fetch_access_token()
# Retrieve existing pipeline variable (if any)
existing_variable = get_existing_pipeline_variable(module, bitbucket)
changed = False
# Create new variable in case it doesn't exists
if not existing_variable and (state == 'present'):
if not module.check_mode:
create_pipeline_variable(module, bitbucket)
changed = True
# Update variable if it is secured or the old value does not match the new one
elif existing_variable and (state == 'present'):
if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value):
if not module.check_mode:
update_pipeline_variable(module, bitbucket, existing_variable['uuid'])
changed = True
# Delete variable
elif existing_variable and (state == 'absent'):
if not module.check_mode:
delete_pipeline_variable(module, bitbucket, existing_variable['uuid'])
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,236 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: bower
short_description: Manage bower packages with bower
description:
- Manage bower packages with bower
author: "Michael Warkentin (@mwarkentin)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
type: str
description:
- The name of a bower package to install
offline:
description:
- Install packages from local cache, if the packages were installed before
type: bool
default: false
production:
description:
- Install with --production flag
type: bool
default: false
path:
type: path
description:
- The base path where to install the bower packages
required: true
relative_execpath:
type: path
description:
- Relative path to bower executable from install path
state:
type: str
description:
- The state of the bower package
default: present
choices: [ "present", "absent", "latest" ]
version:
type: str
description:
- The version to be installed
'''
EXAMPLES = '''
- name: Install "bootstrap" bower package.
community.general.bower:
name: bootstrap
- name: Install "bootstrap" bower package on version 3.1.1.
community.general.bower:
name: bootstrap
version: '3.1.1'
- name: Remove the "bootstrap" bower package.
community.general.bower:
name: bootstrap
state: absent
- name: Install packages based on bower.json.
community.general.bower:
path: /app/location
- name: Update packages based on bower.json to their latest version.
community.general.bower:
path: /app/location
state: latest
# install bower locally and run from there
- npm:
path: /app/location
name: bower
global: false
- community.general.bower:
path: /app/location
relative_execpath: node_modules/.bin
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
class Bower(object):
def __init__(self, module, **kwargs):
self.module = module
self.name = kwargs['name']
self.offline = kwargs['offline']
self.production = kwargs['production']
self.path = kwargs['path']
self.relative_execpath = kwargs['relative_execpath']
self.version = kwargs['version']
if kwargs['version']:
self.name_version = self.name + '#' + self.version
else:
self.name_version = self.name
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = []
if self.relative_execpath:
cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
if not os.path.isfile(cmd[-1]):
self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
else:
cmd.append("bower")
cmd.extend(args)
cmd.extend(['--config.interactive=false', '--allow-root'])
if self.name:
cmd.append(self.name_version)
if self.offline:
cmd.append('--offline')
if self.production:
cmd.append('--production')
# If path is specified, cd into that path and run the command.
cwd = None
if self.path:
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="path %s is not a directory" % self.path)
cwd = self.path
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out
return ''
def list(self):
cmd = ['list', '--json']
installed = list()
missing = list()
outdated = list()
data = json.loads(self._exec(cmd, True, False))
if 'dependencies' in data:
for dep in data['dependencies']:
dep_data = data['dependencies'][dep]
if dep_data.get('missing', False):
missing.append(dep)
elif ('version' in dep_data['pkgMeta'] and
'update' in dep_data and
dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
outdated.append(dep)
elif dep_data.get('incompatible', False):
outdated.append(dep)
else:
installed.append(dep)
# Named dependency not installed
else:
missing.append(self.name)
return installed, missing, outdated
def install(self):
return self._exec(['install'])
def update(self):
return self._exec(['update'])
def uninstall(self):
return self._exec(['uninstall'])
def main():
arg_spec = dict(
name=dict(default=None),
offline=dict(default=False, type='bool'),
production=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
relative_execpath=dict(default=None, required=False, type='path'),
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
version=dict(default=None),
)
module = AnsibleModule(
argument_spec=arg_spec
)
name = module.params['name']
offline = module.params['offline']
production = module.params['production']
path = module.params['path']
relative_execpath = module.params['relative_execpath']
state = module.params['state']
version = module.params['version']
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
changed = False
if state == 'present':
installed, missing, outdated = bower.list()
if missing:
changed = True
bower.install()
elif state == 'latest':
installed, missing, outdated = bower.list()
if missing or outdated:
changed = True
bower.update()
else: # Absent
installed, missing, outdated = bower.list()
if name in installed:
changed = True
bower.uninstall()
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,109 @@
#!/usr/bin/python
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: btrfs_info
short_description: Query btrfs filesystem info
version_added: "6.6.0"
description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints.
author:
- Gregory Furlong (@gnfzdz)
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
'''
EXAMPLES = r'''
- name: Query information about mounted btrfs filesystems
community.general.btrfs_info:
register: my_btrfs_info
'''
RETURN = r'''
filesystems:
description: Summaries of the current state for all btrfs filesystems found on the target host.
type: list
elements: dict
returned: success
contains:
uuid:
description: A unique identifier assigned to the filesystem.
type: str
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
label:
description: An optional label assigned to the filesystem.
type: str
sample: Tank
devices:
description: A list of devices assigned to the filesystem.
type: list
sample:
- /dev/sda1
- /dev/sdb1
default_subvolume:
description: The id of the filesystem's default subvolume.
type: int
sample: 5
subvolumes:
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
type: list
elements: dict
contains:
id:
description: An identifier assigned to the subvolume, unique within the containing filesystem.
type: int
sample: 256
mountpoints:
description: Paths where the subvolume is mounted on the targeted host.
type: list
sample: ['/home']
parent:
description: The identifier of this subvolume's parent.
type: int
sample: 5
path:
description: The full path of the subvolume relative to the btrfs fileystem's root.
type: str
sample: /@home
'''
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider
from ansible.module_utils.basic import AnsibleModule
def run_module():
module_args = dict()
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
provider = BtrfsFilesystemsProvider(module)
filesystems = [x.get_summary() for x in provider.get_filesystems()]
result = {
"filesystems": filesystems,
}
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,679 @@
#!/usr/bin/python
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: btrfs_subvolume
short_description: Manage btrfs subvolumes
version_added: "6.6.0"
description: Creates, updates and deletes btrfs subvolumes and snapshots.
options:
automount:
description:
- Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes.
type: bool
default: false
default:
description:
- Make the subvolume specified by O(name) the filesystem's default subvolume.
type: bool
default: false
filesystem_device:
description:
- A block device contained within the btrfs filesystem to be targeted.
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
type: path
filesystem_label:
description:
- A descriptive label assigned to the btrfs filesystem to be targeted.
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
type: str
filesystem_uuid:
description:
- A unique identifier assigned to the btrfs filesystem to be targeted.
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
type: str
name:
description:
- Name of the subvolume/snapshot to be targeted.
required: true
type: str
recursive:
description:
- When true, indicates that parent/child subvolumes should be created/removedas necessary
to complete the operation (for O(state=present) and O(state=absent) respectively).
type: bool
default: false
snapshot_source:
description:
- Identifies the source subvolume for the created snapshot.
- Infers that the created subvolume is a snapshot.
type: str
snapshot_conflict:
description:
- Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
- V(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
- V(clobber) - If a subvolume already exists at the requested location, delete it first.
This option is not idempotent and will result in a new snapshot being generated on every execution.
- V(error) - If a subvolume already exists at the requested location, return an error.
This option is not idempotent and will result in an error on replay of the module.
type: str
choices: [ skip, clobber, error ]
default: skip
state:
description:
- Indicates the current state of the targeted subvolume.
type: str
choices: [ absent, present ]
default: present
notes:
- If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided, there is expected
to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: partial
details:
- In some scenarios it may erroneously report intermediate subvolumes being created.
After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped.
diff_mode:
support: none
author:
- Gregory Furlong (@gnfzdz)
'''
EXAMPLES = r'''
- name: Create a @home subvolume under the root subvolume
community.general.btrfs_subvolume:
name: /@home
device: /dev/vda2
- name: Remove the @home subvolume if it exists
community.general.btrfs_subvolume:
name: /@home
state: absent
device: /dev/vda2
- name: Create a snapshot of the root subvolume named @
community.general.btrfs_subvolume:
name: /@
snapshot_source: /
device: /dev/vda2
- name: Create a snapshot of the root subvolume and make it the new default subvolume
community.general.btrfs_subvolume:
name: /@
snapshot_source: /
default: Yes
device: /dev/vda2
- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required
community.general.btrfs_subvolume:
name: /@snapshots/@2022_06_09
snapshot_source: /@
recursive: True
device: /dev/vda2
- name: Remove the /@ subvolume and recursively delete child subvolumes as required
community.general.btrfs_subvolume:
name: /@snapshots/@2022_06_09
snapshot_source: /@
recursive: True
device: /dev/vda2
'''
RETURN = r'''
filesystem:
description:
- A summary of the final state of the targeted btrfs filesystem.
type: dict
returned: success
contains:
uuid:
description: A unique identifier assigned to the filesystem.
returned: success
type: str
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
label:
description: An optional label assigned to the filesystem.
returned: success
type: str
sample: Tank
devices:
description: A list of devices assigned to the filesystem.
returned: success
type: list
sample:
- /dev/sda1
- /dev/sdb1
default_subvolume:
description: The ID of the filesystem's default subvolume.
returned: success and if filesystem is mounted
type: int
sample: 5
subvolumes:
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
returned: success and if filesystem is mounted
type: list
elements: dict
contains:
id:
description: An identifier assigned to the subvolume, unique within the containing filesystem.
type: int
sample: 256
mountpoints:
description: Paths where the subvolume is mounted on the targeted host.
type: list
sample: ['/home']
parent:
description: The identifier of this subvolume's parent.
type: int
sample: 5
path:
description: The full path of the subvolume relative to the btrfs fileystem's root.
type: str
sample: /@home
modifications:
description:
- A list where each element describes a change made to the target btrfs filesystem.
type: list
returned: Success
elements: str
target_subvolume_id:
description:
- The ID of the subvolume specified with the O(name) parameter, either pre-existing or created as part of module execution.
type: int
sample: 257
returned: Success and subvolume exists after module execution
'''
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException
from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path
from ansible.module_utils.basic import AnsibleModule
import os
import tempfile
class BtrfsSubvolumeModule(object):
__BTRFS_ROOT_SUBVOLUME = '/'
__BTRFS_ROOT_SUBVOLUME_ID = 5
__BTRFS_SUBVOLUME_INODE_NUMBER = 256
__CREATE_SUBVOLUME_OPERATION = 'create'
__CREATE_SNAPSHOT_OPERATION = 'snapshot'
__DELETE_SUBVOLUME_OPERATION = 'delete'
__SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default'
__UNKNOWN_SUBVOLUME_ID = '?'
def __init__(self, module):
self.module = module
self.__btrfs_api = BtrfsCommands(module)
self.__provider = BtrfsFilesystemsProvider(module)
# module parameters
name = self.module.params['name']
self.__name = normalize_subvolume_path(name) if name is not None else None
self.__state = self.module.params['state']
self.__automount = self.module.params['automount']
self.__default = self.module.params['default']
self.__filesystem_device = self.module.params['filesystem_device']
self.__filesystem_label = self.module.params['filesystem_label']
self.__filesystem_uuid = self.module.params['filesystem_uuid']
self.__recursive = self.module.params['recursive']
self.__snapshot_conflict = self.module.params['snapshot_conflict']
snapshot_source = self.module.params['snapshot_source']
self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None
# execution state
self.__filesystem = None
self.__required_mounts = []
self.__unit_of_work = []
self.__completed_work = []
self.__temporary_mounts = dict()
def run(self):
error = None
try:
self.__load_filesystem()
self.__prepare_unit_of_work()
if not self.module.check_mode:
# check required mounts & mount
if len(self.__unit_of_work) > 0:
self.__execute_unit_of_work()
self.__filesystem.refresh()
else:
# check required mounts
self.__completed_work.extend(self.__unit_of_work)
except Exception as e:
error = e
finally:
self.__cleanup_mounts()
if self.__filesystem is not None:
self.__filesystem.refresh_mountpoints()
return (error, self.get_results())
# Identify the targeted filesystem and obtain the current state
def __load_filesystem(self):
if self.__has_filesystem_criteria():
filesystem = self.__find_matching_filesytem()
else:
filesystem = self.__find_default_filesystem()
# The filesystem must be mounted to obtain the current state (subvolumes, default, etc)
if not filesystem.is_mounted():
if not self.__automount:
raise BtrfsModuleException(
"Target filesystem uuid=%s is not currently mounted and automount=False."
"Mount explicitly before module execution or pass automount=True" % filesystem.uuid)
elif self.module.check_mode:
# TODO is failing the module an appropriate outcome in this scenario?
raise BtrfsModuleException(
"Target filesystem uuid=%s is not currently mounted. Unable to validate the current"
"state while running with check_mode=True" % filesystem.uuid)
else:
self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID)
filesystem.refresh()
self.__filesystem = filesystem
def __has_filesystem_criteria(self):
return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None
def __find_matching_filesytem(self):
criteria = {
'uuid': self.__filesystem_uuid,
'label': self.__filesystem_label,
'device': self.__filesystem_device,
}
return self.__provider.get_matching_filesystem(criteria)
def __find_default_filesystem(self):
filesystems = self.__provider.get_filesystems()
filesystem = None
if len(filesystems) == 1:
filesystem = filesystems[0]
else:
mounted_filesystems = [x for x in filesystems if x.is_mounted()]
if len(mounted_filesystems) == 1:
filesystem = mounted_filesystems[0]
if filesystem is not None:
return filesystem
else:
raise BtrfsModuleException(
"Failed to automatically identify targeted filesystem. "
"No explicit device indicated and found %d available filesystems." % len(filesystems)
)
# Prepare unit of work
def __prepare_unit_of_work(self):
if self.__state == "present":
if self.__snapshot_source is None:
self.__prepare_subvolume_present()
else:
self.__prepare_snapshot_present()
if self.__default:
self.__prepare_set_default()
elif self.__state == "absent":
self.__prepare_subvolume_absent()
def __prepare_subvolume_present(self):
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
if subvolume is None:
self.__prepare_before_create_subvolume(self.__name)
self.__stage_create_subvolume(self.__name)
def __prepare_before_create_subvolume(self, subvolume_name):
closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name)
self.__stage_required_mount(closest_parent)
if self.__recursive:
self.__prepare_create_intermediates(closest_parent, subvolume_name)
def __prepare_create_intermediates(self, closest_subvolume, subvolume_name):
relative_path = closest_subvolume.get_child_relative_path(self.__name)
missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0]
if len(missing_subvolumes) > 1:
current = closest_subvolume.path
for s in missing_subvolumes[:-1]:
separator = os.path.sep if current[-1] != os.path.sep else ""
current = current + separator + s
self.__stage_create_subvolume(current, True)
def __prepare_snapshot_present(self):
source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source)
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
subvolume_exists = subvolume is not None
if subvolume_exists:
if self.__snapshot_conflict == "skip":
# No change required
return
elif self.__snapshot_conflict == "error":
raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name)
if source_subvolume is None:
raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source)
elif subvolume is not None and source_subvolume.id == subvolume.id:
raise BtrfsModuleException("Snapshot source and target are the same.")
else:
self.__stage_required_mount(source_subvolume)
if subvolume_exists and self.__snapshot_conflict == "clobber":
self.__prepare_delete_subvolume_tree(subvolume)
elif not subvolume_exists:
self.__prepare_before_create_subvolume(self.__name)
self.__stage_create_snapshot(source_subvolume, self.__name)
def __prepare_subvolume_absent(self):
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
if subvolume is not None:
self.__prepare_delete_subvolume_tree(subvolume)
def __prepare_delete_subvolume_tree(self, subvolume):
if subvolume.is_filesystem_root():
raise BtrfsModuleException("Can not delete the filesystem's root subvolume")
if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0:
raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False."
"Either explicitly delete the child subvolumes first or pass "
"parameter recursive=True." % subvolume.path)
self.__stage_required_mount(subvolume.get_parent_subvolume())
queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume]
# prepare unit of work
for s in queue:
if s.is_mounted():
# TODO potentially unmount the subvolume if automount=True ?
raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path)
if s.is_filesystem_default():
self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID)
self.__stage_delete_subvolume(s)
def __prepare_recursive_delete_order(self, subvolume):
"""Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors"""
pending = [subvolume]
ordered = []
while len(pending) > 0:
next = pending.pop()
ordered.append(next)
pending.extend(next.get_child_subvolumes())
ordered.reverse() # reverse to ensure children are deleted before their parent
return ordered
def __prepare_set_default(self):
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
subvolume_id = subvolume.id if subvolume is not None else None
if self.__filesystem.default_subvolid != subvolume_id:
self.__stage_set_default_subvolume(self.__name, subvolume_id)
# Stage operations to the unit of work
def __stage_required_mount(self, subvolume):
if subvolume.get_mounted_path() is None:
if self.__automount:
self.__required_mounts.append(subvolume)
else:
raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path)
def __stage_create_subvolume(self, subvolume_path, intermediate=False):
"""
Add required creation of an intermediate subvolume to the unit of work
If intermediate is true, the action will be skipped if a directory like file is found at target
after mounting a parent subvolume
"""
self.__unit_of_work.append({
'action': self.__CREATE_SUBVOLUME_OPERATION,
'target': subvolume_path,
'intermediate': intermediate,
})
def __stage_create_snapshot(self, source_subvolume, target_subvolume_path):
"""Add creation of a snapshot from source to target to the unit of work"""
self.__unit_of_work.append({
'action': self.__CREATE_SNAPSHOT_OPERATION,
'source': source_subvolume.path,
'source_id': source_subvolume.id,
'target': target_subvolume_path,
})
def __stage_delete_subvolume(self, subvolume):
"""Add deletion of the target subvolume to the unit of work"""
self.__unit_of_work.append({
'action': self.__DELETE_SUBVOLUME_OPERATION,
'target': subvolume.path,
'target_id': subvolume.id,
})
def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None):
"""Add update of the filesystem's default subvolume to the unit of work"""
self.__unit_of_work.append({
'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION,
'target': subvolume_path,
'target_id': subvolume_id,
})
# Execute the unit of work
def __execute_unit_of_work(self):
self.__check_required_mounts()
for op in self.__unit_of_work:
if op['action'] == self.__CREATE_SUBVOLUME_OPERATION:
self.__execute_create_subvolume(op)
elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION:
self.__execute_create_snapshot(op)
elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION:
self.__execute_delete_subvolume(op)
elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
self.__execute_set_default_subvolume(op)
else:
raise ValueError("Unknown operation type '%s'" % op['action'])
def __execute_create_subvolume(self, operation):
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
if not self.__is_existing_directory_like(target_mounted_path):
self.__btrfs_api.subvolume_create(target_mounted_path)
self.__completed_work.append(operation)
def __execute_create_snapshot(self, operation):
source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source'])
source_mounted_path = source_subvolume.get_mounted_path()
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path)
self.__completed_work.append(operation)
def __execute_delete_subvolume(self, operation):
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
self.__btrfs_api.subvolume_delete(target_mounted_path)
self.__completed_work.append(operation)
def __execute_set_default_subvolume(self, operation):
target = operation['target']
target_id = operation['target_id']
if target_id is None:
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
if target_subvolume is None:
self.__filesystem.refresh() # the target may have been created earlier in module execution
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
if target_subvolume is None:
raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target)
else:
target_id = target_subvolume.id
self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id)
self.__completed_work.append(operation)
def __is_existing_directory_like(self, path):
return os.path.exists(path) and (
os.path.isdir(path) or
os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER
)
def __check_required_mounts(self):
filtered = self.__filter_child_subvolumes(self.__required_mounts)
if len(filtered) > 0:
for subvolume in filtered:
self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id)
self.__filesystem.refresh_mountpoints()
def __filter_child_subvolumes(self, subvolumes):
"""Filter the provided list of subvolumes to remove any that are a child of another item in the list"""
filtered = []
last = None
ordered = sorted(subvolumes, key=lambda x: x.path)
for next in ordered:
if last is None or not next.path[0:len(last)] == last:
filtered.append(next)
last = next.path
return filtered
# Create/cleanup temporary mountpoints
def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid):
# this check should be redundant
if self.module.check_mode or not self.__automount:
raise BtrfsModuleException("Unable to temporarily mount required subvolumes"
"with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode))
cache_key = "%s:%d" % (filesystem.uuid, subvolid)
# The subvolume was already mounted, so return the current path
if cache_key in self.__temporary_mounts:
return self.__temporary_mounts[cache_key]
device = filesystem.devices[0]
mountpoint = tempfile.mkdtemp(dir="/tmp")
self.__temporary_mounts[cache_key] = mountpoint
mount = self.module.get_bin_path("mount", required=True)
command = [mount, "-o", "noatime,subvolid=%d" % subvolid, device, mountpoint]
result = self.module.run_command(command, check_rc=True)
return mountpoint
def __cleanup_mounts(self):
for key in self.__temporary_mounts.keys():
self.__cleanup_mount(self.__temporary_mounts[key])
def __cleanup_mount(self, mountpoint):
umount = self.module.get_bin_path("umount", required=True)
result = self.module.run_command([umount, mountpoint])
if result[0] == 0:
rmdir = self.module.get_bin_path("rmdir", required=True)
self.module.run_command([rmdir, mountpoint])
# Format and return results
def get_results(self):
target = self.__filesystem.get_subvolume_by_name(self.__name)
return dict(
changed=len(self.__completed_work) > 0,
filesystem=self.__filesystem.get_summary(),
modifications=self.__get_formatted_modifications(),
target_subvolume_id=(target.id if target is not None else None)
)
def __get_formatted_modifications(self):
return [self.__format_operation_result(op) for op in self.__completed_work]
def __format_operation_result(self, operation):
action_type = operation['action']
if action_type == self.__CREATE_SUBVOLUME_OPERATION:
return self.__format_create_subvolume_result(operation)
elif action_type == self.__CREATE_SNAPSHOT_OPERATION:
return self.__format_create_snapshot_result(operation)
elif action_type == self.__DELETE_SUBVOLUME_OPERATION:
return self.__format_delete_subvolume_result(operation)
elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
return self.__format_set_default_subvolume_result(operation)
else:
raise ValueError("Unknown operation type '%s'" % operation['action'])
def __format_create_subvolume_result(self, operation):
target = operation['target']
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
return "Created subvolume '%s' (%s)" % (target, target_id)
def __format_create_snapshot_result(self, operation):
source = operation['source']
source_id = operation['source_id']
target = operation['target']
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id)
def __format_delete_subvolume_result(self, operation):
target = operation['target']
target_id = operation['target_id']
return "Deleted subvolume '%s' (%s)" % (target, target_id)
def __format_set_default_subvolume_result(self, operation):
target = operation['target']
if 'target_id' in operation:
target_id = operation['target_id']
else:
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
return "Updated default subvolume to '%s' (%s)" % (target, target_id)
def run_module():
module_args = dict(
automount=dict(type='bool', required=False, default=False),
default=dict(type='bool', required=False, default=False),
filesystem_device=dict(type='path', required=False),
filesystem_label=dict(type='str', required=False),
filesystem_uuid=dict(type='str', required=False),
name=dict(type='str', required=True),
recursive=dict(type='bool', default=False),
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
snapshot_source=dict(type='str', required=False),
snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
subvolume = BtrfsSubvolumeModule(module)
error, result = subvolume.run()
if error is not None:
module.fail_json(str(error), **result)
else:
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,211 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: bundler
short_description: Manage Ruby Gem dependencies with Bundler
description:
- Manage installation and Gem version dependencies for Ruby using the Bundler gem
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
executable:
type: str
description:
- The path to the bundler executable
state:
type: str
description:
- The desired state of the Gem bundle. V(latest) updates gems to the most recent, acceptable version
choices: [present, latest]
default: present
chdir:
type: path
description:
- The directory to execute the bundler commands from. This directory
needs to contain a valid Gemfile or .bundle/ directory
- If not specified, it will default to the temporary working directory
exclude_groups:
type: list
elements: str
description:
- A list of Gemfile groups to exclude during operations. This only
applies when O(state=present). Bundler considers this
a 'remembered' property for the Gemfile and will automatically exclude
groups in future operations even if O(exclude_groups) is not set
clean:
description:
- Only applies if O(state=present). If set removes any gems on the
target host that are not in the gemfile
type: bool
default: false
gemfile:
type: path
description:
- Only applies if O(state=present). The path to the gemfile to use to install gems.
- If not specified it will default to the Gemfile in current directory
local:
description:
- If set only installs gems from the cache on the target host
type: bool
default: false
deployment_mode:
description:
- Only applies if O(state=present). If set it will install gems in
./vendor/bundle instead of the default location. Requires a Gemfile.lock
file to have been created prior
type: bool
default: false
user_install:
description:
- Only applies if O(state=present). Installs gems in the local user's cache or for all users
type: bool
default: true
gem_path:
type: path
description:
- Only applies if O(state=present). Specifies the directory to
install the gems into. If O(chdir) is set then this path is relative to
O(chdir)
- If not specified the default RubyGems gem paths will be used.
binstub_directory:
type: path
description:
- Only applies if O(state=present). Specifies the directory to
install any gem bins files to. When executed the bin files will run
within the context of the Gemfile and fail if any required gem
dependencies are not installed. If O(chdir) is set then this path is
relative to O(chdir)
extra_args:
type: str
description:
- A space separated string of additional commands that can be applied to
the Bundler command. Refer to the Bundler documentation for more
information
author: "Tim Hoiberg (@thoiberg)"
'''
EXAMPLES = '''
- name: Install gems from a Gemfile in the current directory
community.general.bundler:
state: present
executable: ~/.rvm/gems/2.1.5/bin/bundle
- name: Exclude the production group from installing
community.general.bundler:
state: present
exclude_groups: production
- name: Install gems into ./vendor/bundle
community.general.bundler:
state: present
deployment_mode: true
- name: Install gems using a Gemfile in another directory
community.general.bundler:
state: present
gemfile: ../rails_project/Gemfile
- name: Update Gemfile in another directory
community.general.bundler:
state: latest
chdir: ~/rails_project
'''
from ansible.module_utils.basic import AnsibleModule
def get_bundler_executable(module):
if module.params.get('executable'):
result = module.params.get('executable').split(' ')
else:
result = [module.get_bin_path('bundle', True)]
return result
def main():
module = AnsibleModule(
argument_spec=dict(
executable=dict(default=None, required=False),
state=dict(default='present', required=False, choices=['present', 'latest']),
chdir=dict(default=None, required=False, type='path'),
exclude_groups=dict(default=None, required=False, type='list', elements='str'),
clean=dict(default=False, required=False, type='bool'),
gemfile=dict(default=None, required=False, type='path'),
local=dict(default=False, required=False, type='bool'),
deployment_mode=dict(default=False, required=False, type='bool'),
user_install=dict(default=True, required=False, type='bool'),
gem_path=dict(default=None, required=False, type='path'),
binstub_directory=dict(default=None, required=False, type='path'),
extra_args=dict(default=None, required=False),
),
supports_check_mode=True
)
state = module.params.get('state')
chdir = module.params.get('chdir')
exclude_groups = module.params.get('exclude_groups')
clean = module.params.get('clean')
gemfile = module.params.get('gemfile')
local = module.params.get('local')
deployment_mode = module.params.get('deployment_mode')
user_install = module.params.get('user_install')
gem_path = module.params.get('gem_path')
binstub_directory = module.params.get('binstub_directory')
extra_args = module.params.get('extra_args')
cmd = get_bundler_executable(module)
if module.check_mode:
cmd.append('check')
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
if state == 'present':
cmd.append('install')
if exclude_groups:
cmd.extend(['--without', ':'.join(exclude_groups)])
if clean:
cmd.append('--clean')
if gemfile:
cmd.extend(['--gemfile', gemfile])
if local:
cmd.append('--local')
if deployment_mode:
cmd.append('--deployment')
if not user_install:
cmd.append('--system')
if gem_path:
cmd.extend(['--path', gem_path])
if binstub_directory:
cmd.extend(['--binstubs', binstub_directory])
else:
cmd.append('update')
if local:
cmd.append('--local')
if extra_args:
cmd.extend(extra_args.split(' '))
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,201 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, André Paramés <git@andreparames.com>
# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: bzr
author:
- André Paramés (@andreparames)
short_description: Deploy software (or files) from bzr branches
description:
- Manage C(bzr) branches to deploy files or software.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- SSH or HTTP protocol address of the parent branch.
aliases: [ parent ]
required: true
type: str
dest:
description:
- Absolute path of where the branch should be cloned to.
required: true
type: path
version:
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
default: head
type: str
force:
description:
- If V(true), any modified files in the working
tree will be discarded.
type: bool
default: false
executable:
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
type: str
'''
EXAMPLES = '''
- name: Checkout
community.general.bzr:
name: bzr+ssh://foosball.example.org/path/to/branch
dest: /srv/checkout
version: 22
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except Exception:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.
'''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=false).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path', required=True),
name=dict(type='str', required=True, aliases=['parent']),
version=dict(type='str', default='head'),
force=dict(type='bool', default=False),
executable=dict(type='str'),
)
)
dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err = (0, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,162 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: campfire
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
subscription:
type: str
description:
- The subscription name to use.
required: true
token:
type: str
description:
- API token.
required: true
room:
type: str
description:
- Room number to which the message should be sent.
required: true
msg:
type: str
description:
- The message body.
required: true
notify:
type: str
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ ]
author: "Adam Garside (@fabulops)"
'''
EXAMPLES = '''
- name: Send a message to Campfire
community.general.campfire:
subscription: foo
token: 12345
room: 123
msg: Task completed.
- name: Send a message to Campfire
community.general.campfire:
subscription: foo
token: 12345
room: 123
notify: loggins
msg: Task completed ... with feeling.
'''
try:
from html import escape as html_escape
except ImportError:
# Python-3.2 or later
import cgi
def html_escape(text, quote=True):
return cgi.escape(text, quote)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def main():
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
# Hack to add basic auth username and password the way fetch_url expects
module.params['url_username'] = token
module.params['url_password'] = 'X'
target_url = '%s/room/%s/speak.xml' % (URI, room)
headers = {'Content-Type': 'application/xml',
'User-agent': AGENT}
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(notify, info['status']))
# Send the message
response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, info['status']))
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,188 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Nate Coraor <nate@bx.psu.edu>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: capabilities
short_description: Manage Linux capabilities
description:
- This module manipulates files privileges using the Linux capabilities(7) system.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
path:
description:
- Specifies the path to the file to be managed.
type: str
required: true
aliases: [ key ]
capability:
description:
- Desired capability to set (with operator and flags, if O(state=present)) or remove (if O(state=absent))
type: str
required: true
aliases: [ cap ]
state:
description:
- Whether the entry should be present or absent in the file's capabilities.
type: str
choices: [ absent, present ]
default: present
notes:
- The capabilities system will automatically transform operators and flags into the effective set,
so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
- This module does not attempt to determine the final operator and flags to compare,
so you will want to ensure that your capabilities argument matches the final capabilities.
author:
- Nate Coraor (@natefoo)
'''
EXAMPLES = r'''
- name: Set cap_sys_chroot+ep on /foo
community.general.capabilities:
path: /foo
capability: cap_sys_chroot+ep
state: present
- name: Remove cap_net_bind_service from /bar
community.general.capabilities:
path: /bar
capability: cap_net_bind_service
state: absent
'''
from ansible.module_utils.basic import AnsibleModule
OPS = ('=', '-', '+')
class CapabilitiesModule(object):
platform = 'Linux'
distribution = None
def __init__(self, module):
self.module = module
self.path = module.params['path'].strip()
self.capability = module.params['capability'].strip().lower()
self.state = module.params['state']
self.getcap_cmd = module.get_bin_path('getcap', required=True)
self.setcap_cmd = module.get_bin_path('setcap', required=True)
self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
self.run()
def run(self):
current = self.getcap(self.path)
caps = [cap[0] for cap in current]
if self.state == 'present' and self.capability_tup not in current:
# need to add capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list if it's already set (but op/flags differ)
current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
# add new cap with correct op/flags
current.append(self.capability_tup)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
elif self.state == 'absent' and self.capability_tup[0] in caps:
# need to remove capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list and then set current list
current = filter(lambda x: x[0] != self.capability_tup[0], current)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
self.module.exit_json(changed=False, state=self.state)
def getcap(self, path):
rval = []
cmd = "%s -v %s" % (self.getcap_cmd, path)
rc, stdout, stderr = self.module.run_command(cmd)
# If file xattrs are set but no caps are set the output will be:
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
# If the file does not exist, the stderr will be (with rc == 0...):
# '/foo (No such file or directory)'
if rc != 0 or stderr != "":
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
if ' =' in stdout:
# process output of an older version of libcap
caps = stdout.split(' =')[1].strip().split()
else:
# otherwise, we have a newer version here
# see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
caps = stdout.split()[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
# comma-separated list, so we have to parse that
if ',' in cap:
cap_group = cap.split(',')
cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
for subcap in cap_group:
rval.append((subcap, op, flags))
else:
rval.append(self._parse_cap(cap))
return rval
def setcap(self, path, caps):
caps = ' '.join([''.join(cap) for cap in caps])
cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
else:
return stdout
def _parse_cap(self, cap, op_required=True):
opind = -1
try:
i = 0
while opind == -1:
opind = cap.find(OPS[i])
i += 1
except Exception:
if op_required:
self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
else:
return (cap, None, None)
op = cap[opind]
cap, flags = cap.split(op)
return (cap, op, flags)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec=dict(
path=dict(type='str', required=True, aliases=['key']),
capability=dict(type='str', required=True, aliases=['cap']),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
CapabilitiesModule(module)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,293 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Radek Sprta <mail@radeksprta.eu>
# Copyright (c) 2024 Colin Nolan <cn580@alumni.york.ac.uk>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: cargo
short_description: Manage Rust packages with cargo
version_added: 4.3.0
description:
- Manage Rust packages with cargo.
author: "Radek Sprta (@radek-sprta)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
executable:
description:
- Path to the C(cargo) installed in the system.
- If not specified, the module will look C(cargo) in E(PATH).
type: path
version_added: 7.5.0
name:
description:
- The name of a Rust package to install.
type: list
elements: str
required: true
path:
description:
->
The base path where to install the Rust packages. Cargo automatically appends
V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).
type: path
version:
description:
->
The version to install. If O(name) contains multiple values, the module will
try to install all of them in this version.
type: str
required: false
locked:
description:
- Install with locked dependencies.
- This is only used when installing packages.
required: false
type: bool
default: false
version_added: 7.5.0
state:
description:
- The state of the Rust package.
required: false
type: str
default: present
choices: [ "present", "absent", "latest" ]
directory:
description:
- Path to the source directory to install the Rust package from.
- This is only used when installing packages.
type: path
required: false
version_added: 9.1.0
requirements:
- cargo installed
"""
EXAMPLES = r"""
- name: Install "ludusavi" Rust package
community.general.cargo:
name: ludusavi
- name: Install "ludusavi" Rust package with locked dependencies
community.general.cargo:
name: ludusavi
locked: true
- name: Install "ludusavi" Rust package in version 0.10.0
community.general.cargo:
name: ludusavi
version: '0.10.0'
- name: Install "ludusavi" Rust package to global location
community.general.cargo:
name: ludusavi
path: /usr/local
- name: Remove "ludusavi" Rust package
community.general.cargo:
name: ludusavi
state: absent
- name: Update "ludusavi" Rust package its latest version
community.general.cargo:
name: ludusavi
state: latest
- name: Install "ludusavi" Rust package from source directory
community.general.cargo:
name: ludusavi
directory: /path/to/ludusavi/source
"""
import json
import os
import re
from ansible.module_utils.basic import AnsibleModule
class Cargo(object):
def __init__(self, module, **kwargs):
self.module = module
self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)]
self.name = kwargs["name"]
self.path = kwargs["path"]
self.state = kwargs["state"]
self.version = kwargs["version"]
self.locked = kwargs["locked"]
self.directory = kwargs["directory"]
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if path is not None and not os.path.isdir(path):
self.module.fail_json(msg="Path %s is not a directory" % path)
self._path = path
def _exec(
self, args, run_in_check_mode=False, check_rc=True, add_package_name=True
):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = self.executable + args
rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
return out, err
return "", ""
def get_installed(self):
cmd = ["install", "--list"]
if self.path:
cmd.append("--root")
cmd.append(self.path)
data, dummy = self._exec(cmd, True, False, False)
package_regex = re.compile(r"^([\w\-]+) v(\S+).*:$")
installed = {}
for line in data.splitlines():
package_info = package_regex.match(line)
if package_info:
installed[package_info.group(1)] = package_info.group(2)
return installed
def install(self, packages=None):
cmd = ["install"]
cmd.extend(packages or self.name)
if self.locked:
cmd.append("--locked")
if self.path:
cmd.append("--root")
cmd.append(self.path)
if self.version:
cmd.append("--version")
cmd.append(self.version)
if self.directory:
cmd.append("--path")
cmd.append(self.directory)
return self._exec(cmd)
def is_outdated(self, name):
installed_version = self.get_installed().get(name)
latest_version = (
self.get_latest_published_version(name)
if not self.directory
else self.get_source_directory_version(name)
)
return installed_version != latest_version
def get_latest_published_version(self, name):
cmd = ["search", name, "--limit", "1"]
data, dummy = self._exec(cmd, True, False, False)
match = re.search(r'"(.+)"', data)
if not match:
self.module.fail_json(
msg="No published version for package %s found" % name
)
return match.group(1)
def get_source_directory_version(self, name):
cmd = [
"metadata",
"--format-version",
"1",
"--no-deps",
"--manifest-path",
os.path.join(self.directory, "Cargo.toml"),
]
data, dummy = self._exec(cmd, True, False, False)
manifest = json.loads(data)
package = next(
(package for package in manifest["packages"] if package["name"] == name),
None,
)
if not package:
self.module.fail_json(
msg="Package %s not defined in source, found: %s"
% (name, [x["name"] for x in manifest["packages"]])
)
return package["version"]
def uninstall(self, packages=None):
cmd = ["uninstall"]
cmd.extend(packages or self.name)
return self._exec(cmd)
def main():
arg_spec = dict(
executable=dict(default=None, type="path"),
name=dict(required=True, type="list", elements="str"),
path=dict(default=None, type="path"),
state=dict(default="present", choices=["present", "absent", "latest"]),
version=dict(default=None, type="str"),
locked=dict(default=False, type="bool"),
directory=dict(default=None, type="path"),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params["name"]
state = module.params["state"]
version = module.params["version"]
directory = module.params["directory"]
if not name:
module.fail_json(msg="Package name must be specified")
if directory is not None and not os.path.isdir(directory):
module.fail_json(msg="Source directory does not exist")
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
)
cargo = Cargo(module, **module.params)
changed, out, err = False, None, None
installed_packages = cargo.get_installed()
if state == "present":
to_install = [
n
for n in name
if (n not in installed_packages)
or (version and version != installed_packages[n])
]
if to_install:
changed = True
out, err = cargo.install(to_install)
elif state == "latest":
to_update = [
n for n in name if n not in installed_packages or cargo.is_outdated(n)
]
if to_update:
changed = True
out, err = cargo.install(to_update)
else: # absent
to_uninstall = [n for n in name if n in installed_packages]
if to_uninstall:
changed = True
out, err = cargo.uninstall(to_uninstall)
module.exit_json(changed=changed, stdout=out, stderr=err)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,162 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Jonathan Mainguy <jon@soh.re>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
# basis of code taken from the ansible twillio and nexmo modules
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: catapult
short_description: Send a sms / mms using the catapult bandwidth api
description:
- Allows notifications to be sent using sms / mms via the catapult bandwidth api.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
src:
type: str
description:
- One of your catapult telephone numbers the message should come from (must be in E.164 format, like V(+19195551212)).
required: true
dest:
type: list
elements: str
description:
- The phone number or numbers the message should be sent to (must be in E.164 format, like V(+19195551212)).
required: true
msg:
type: str
description:
- The contents of the text message (must be 2048 characters or less).
required: true
media:
type: str
description:
- For MMS messages, a media url to the location of the media to be sent with the message.
user_id:
type: str
description:
- User Id from Api account page.
required: true
api_token:
type: str
description:
- Api Token from Api account page.
required: true
api_secret:
type: str
description:
- Api Secret from Api account page.
required: true
author: "Jonathan Mainguy (@Jmainguy)"
notes:
- Will return changed even if the media url is wrong.
- Will return changed if the destination number is invalid.
'''
EXAMPLES = '''
- name: Send a mms to multiple users
community.general.catapult:
src: "+15035555555"
dest:
- "+12525089000"
- "+12018994225"
media: "http://example.com/foobar.jpg"
msg: "Task is complete"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
- name: Send a sms to a single user
community.general.catapult:
src: "+15035555555"
dest: "+12018994225"
msg: "Consider yourself notified"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
'''
RETURN = '''
changed:
description: Whether the api accepted the message.
returned: always
type: bool
sample: true
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def send(module, src, dest, msg, media, user_id, api_token, api_secret):
"""
Send the message
"""
AGENT = "Ansible"
URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
data = {'from': src, 'to': dest, 'text': msg}
if media:
data['media'] = media
headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = api_token.replace('\n', '')
module.params['url_password'] = api_secret.replace('\n', '')
return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(required=True),
dest=dict(required=True, type='list', elements='str'),
msg=dict(required=True),
user_id=dict(required=True),
api_token=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
media=dict(default=None, required=False),
),
)
src = module.params['src']
dest = module.params['dest']
msg = module.params['msg']
media = module.params['media']
user_id = module.params['user_id']
api_token = module.params['api_token']
api_secret = module.params['api_secret']
for number in dest:
rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
if info["status"] != 201:
body = json.loads(info["body"])
fail_msg = body["message"]
module.fail_json(msg=fail_msg)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,243 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2015, Epic Games, Inc.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: circonus_annotation
short_description: Create an annotation in circonus
description:
- Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
author: "Nick Harring (@NickatEpic)"
requirements:
- requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
notes:
- Check mode isn't supported.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
api_key:
type: str
description:
- Circonus API key
required: true
category:
type: str
description:
- Annotation Category
required: true
description:
type: str
description:
- Description of annotation
required: true
title:
type: str
description:
- Title of annotation
required: true
start:
type: int
description:
- Unix timestamp of event start
- If not specified, it defaults to "now".
stop:
type: int
description:
- Unix timestamp of event end
- If not specified, it defaults to "now" + O(duration).
duration:
type: int
description:
- Duration in seconds of annotation
default: 0
'''
EXAMPLES = '''
- name: Create a simple annotation event with a source, defaults to start and end time of now
community.general.circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
- name: Create an annotation with a duration of 5 minutes and a default start time of now
community.general.circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
duration: 300
- name: Create an annotation with a start_time and end_time
community.general.circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
start_time: 1395940006
end_time: 1395954407
'''
RETURN = '''
annotation:
description: details about the created annotation
returned: success
type: complex
contains:
_cid:
description: annotation identifier
returned: success
type: str
sample: /annotation/100000
_created:
description: creation timestamp
returned: success
type: int
sample: 1502236928
_last_modified:
description: last modification timestamp
returned: success
type: int
sample: 1502236928
_last_modified_by:
description: last modified by
returned: success
type: str
sample: /user/1000
category:
description: category of the created annotation
returned: success
type: str
sample: alerts
title:
description: title of the created annotation
returned: success
type: str
sample: WARNING
description:
description: description of the created annotation
returned: success
type: str
sample: Host is down.
start:
description: timestamp, since annotation applies
returned: success
type: int
sample: Host is down.
stop:
description: timestamp, since annotation ends
returned: success
type: str
sample: Host is down.
rel_metrics:
description: Array of metrics related to this annotation, each metrics is a string.
returned: success
type: list
sample:
- 54321_kbps
'''
import json
import time
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import PY3
from ansible.module_utils.common.text.converters import to_native
def check_requests_dep(module):
"""Check if an adequate requests version is available"""
if not HAS_REQUESTS:
module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
else:
required_version = '2.0.0' if PY3 else '1.0.0'
if LooseVersion(requests.__version__) < LooseVersion(required_version):
module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
def post_annotation(annotation, api_key):
''' Takes annotation dict and api_key string'''
base_url = 'https://api.circonus.com/v2'
anootate_post_endpoint = '/annotation'
resp = requests.post(base_url + anootate_post_endpoint,
headers=build_headers(api_key), data=json.dumps(annotation))
resp.raise_for_status()
return resp
def create_annotation(module):
''' Takes ansible module object '''
annotation = {}
duration = module.params['duration']
if module.params['start'] is not None:
start = module.params['start']
else:
start = int(time.time())
if module.params['stop'] is not None:
stop = module.params['stop']
else:
stop = int(time.time()) + duration
annotation['start'] = start
annotation['stop'] = stop
annotation['category'] = module.params['category']
annotation['description'] = module.params['description']
annotation['title'] = module.params['title']
return annotation
def build_headers(api_token):
'''Takes api token, returns headers with it included.'''
headers = {'X-Circonus-App-Name': 'ansible',
'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
'Accept': 'application/json'}
return headers
def main():
'''Main function, dispatches logic'''
module = AnsibleModule(
argument_spec=dict(
start=dict(type='int'),
stop=dict(type='int'),
category=dict(required=True),
title=dict(required=True),
description=dict(required=True),
duration=dict(default=0, type='int'),
api_key=dict(required=True, no_log=True)
)
)
check_requests_dep(module)
annotation = create_annotation(module)
try:
resp = post_annotation(annotation, module.params['api_key'])
except requests.exceptions.RequestException as e:
module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, annotation=resp.json())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,197 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: cisco_webex
short_description: Send a message to a Cisco Webex Teams Room or Individual
description:
- Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
author: Drew Rusell (@drew-russell)
notes:
- The O(recipient_type) must be valid for the supplied O(recipient_id).
- Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
recipient_type:
description:
- The request parameter you would like to send the message to.
- Messages can be sent to either a room or individual (by ID or E-Mail).
required: true
choices: ['roomId', 'toPersonEmail', 'toPersonId']
type: str
recipient_id:
description:
- The unique identifier associated with the supplied O(recipient_type).
required: true
type: str
msg_type:
description:
- Specifies how you would like the message formatted.
default: text
choices: ['text', 'markdown']
type: str
aliases: ['message_type']
personal_token:
description:
- Your personal access token required to validate the Webex Teams API.
required: true
aliases: ['token']
type: str
msg:
description:
- The message you would like to send.
required: true
type: str
'''
EXAMPLES = """
# Note: The following examples assume a variable file has been imported
# that contains the appropriate information.
- name: Cisco Webex Teams - Markdown Message to a Room
community.general.cisco_webex:
recipient_type: roomId
recipient_id: "{{ room_id }}"
msg_type: markdown
personal_token: "{{ token }}"
msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
- name: Cisco Webex Teams - Text Message to a Room
community.general.cisco_webex:
recipient_type: roomId
recipient_id: "{{ room_id }}"
msg_type: text
personal_token: "{{ token }}"
msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
- name: Cisco Webex Teams - Text Message by an Individuals ID
community.general.cisco_webex:
recipient_type: toPersonId
recipient_id: "{{ person_id}}"
msg_type: text
personal_token: "{{ token }}"
msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
community.general.cisco_webex:
recipient_type: toPersonEmail
recipient_id: "{{ person_email }}"
msg_type: text
personal_token: "{{ token }}"
msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
"""
RETURN = """
status_code:
description:
- The Response Code returned by the Webex Teams API.
- Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
returned: always
type: int
sample: 200
message:
description:
- The Response Message returned by the Webex Teams API.
- Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
returned: always
type: str
sample: OK (585 bytes)
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def webex_msg(module):
"""When check mode is specified, establish a read only connection, that does not return any user specific
data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
# Ansible Specific Variables
results = {}
ansible = module.params
headers = {
'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
'content-type': 'application/json'
}
if module.check_mode:
url = "https://webexapis.com/v1/people/me"
payload = None
else:
url = "https://webexapis.com/v1/messages"
payload = {
ansible['recipient_type']: ansible['recipient_id'],
ansible['msg_type']: ansible['msg']
}
payload = module.jsonify(payload)
response, info = fetch_url(module, url, data=payload, headers=headers)
status_code = info['status']
msg = info['msg']
# Module will fail if the response is not 200
if status_code != 200:
results['failed'] = True
results['status_code'] = status_code
results['message'] = msg
else:
results['failed'] = False
results['status_code'] = status_code
if module.check_mode:
results['message'] = 'Authentication Successful.'
else:
results['message'] = msg
return results
def main():
'''Ansible main. '''
module = AnsibleModule(
argument_spec=dict(
recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
recipient_id=dict(required=True, no_log=True),
msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
personal_token=dict(required=True, no_log=True, aliases=['token']),
msg=dict(required=True),
),
supports_check_mode=True
)
results = webex_msg(module)
module.exit_json(**results)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,353 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_aa_policy
short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud
description:
- An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- The name of the Anti Affinity Policy.
type: str
required: true
location:
description:
- Datacenter in which the policy lives/should live.
type: str
required: true
state:
description:
- Whether to create or delete the policy.
type: str
required: false
default: present
choices: ['present','absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
---
- name: Create AA Policy
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Create an Anti Affinity Policy
community.general.clc_aa_policy:
name: Hammer Time
location: UK3
state: present
register: policy
- name: Debug
ansible.builtin.debug:
var: policy
- name: Delete AA Policy
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Delete an Anti Affinity Policy
community.general.clc_aa_policy:
name: Hammer Time
location: UK3
state: absent
register: policy
- name: Debug
ansible.builtin.debug:
var: policy
'''
RETURN = '''
policy:
description: The anti affinity policy information
returned: success
type: dict
sample:
{
"id":"1a28dd0988984d87b9cd61fa8da15424",
"name":"test_aa_policy",
"location":"UC1",
"links":[
{
"rel":"self",
"href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
"verbs":[
"GET",
"DELETE",
"PUT"
]
},
{
"rel":"location",
"href":"/v2/datacenters/wfad/UC1",
"id":"uc1",
"name":"UC1 - US West (Santa Clara)"
}
]
}
'''
__version__ = '${version}'
import os
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk:
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcAntiAffinityPolicy:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
self.policy_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'),
exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'),
exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
location=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
return argument_spec
# Module Behavior Goodness
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
self._set_clc_credentials_from_env()
self.policy_dict = self._get_policies_for_datacenter(p)
if p['state'] == "absent":
changed, policy = self._ensure_policy_is_absent(p)
else:
changed, policy = self._ensure_policy_is_present(p)
if hasattr(policy, 'data'):
policy = policy.data
elif hasattr(policy, '__dict__'):
policy = policy.__dict__
self.module.exit_json(changed=changed, policy=policy)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_policies_for_datacenter(self, p):
"""
Get the Policies for a datacenter by calling the CLC API.
:param p: datacenter to get policies from
:return: policies in the datacenter
"""
response = {}
policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
for policy in policies:
response[policy.name] = policy
return response
def _create_policy(self, p):
"""
Create an Anti Affinity Policy using the CLC API.
:param p: datacenter to create policy in
:return: response dictionary from the CLC API.
"""
try:
return self.clc.v2.AntiAffinity.Create(
name=p['name'],
location=p['location'])
except CLCException as ex:
self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
p['name'], ex.response_text
))
def _delete_policy(self, p):
"""
Delete an Anti Affinity Policy using the CLC API.
:param p: datacenter to delete a policy from
:return: none
"""
try:
policy = self.policy_dict[p['name']]
policy.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
p['name'], ex.response_text
))
def _policy_exists(self, policy_name):
"""
Check to see if an Anti Affinity Policy exists
:param policy_name: name of the policy
:return: boolean of if the policy exists
"""
if policy_name in self.policy_dict:
return self.policy_dict.get(policy_name)
return False
def _ensure_policy_is_absent(self, p):
"""
Makes sure that a policy is absent
:param p: dictionary of policy name
:return: tuple of if a deletion occurred and the name of the policy that was deleted
"""
changed = False
if self._policy_exists(policy_name=p['name']):
changed = True
if not self.module.check_mode:
self._delete_policy(p)
return changed, None
def _ensure_policy_is_present(self, p):
"""
Ensures that a policy is present
:param p: dictionary of a policy name
:return: tuple of if an addition occurred and the name of the policy that was added
"""
changed = False
policy = self._policy_exists(policy_name=p['name'])
if not policy:
changed = True
policy = None
if not self.module.check_mode:
policy = self._create_policy(p)
return changed, policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_aa_policy = ClcAntiAffinityPolicy(module)
clc_aa_policy.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,536 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_alert_policy
short_description: Create or Delete Alert Policies at CenturyLink Cloud
description:
- An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
alias:
description:
- The alias of your CLC Account
type: str
required: true
name:
description:
- The name of the alert policy. This is mutually exclusive with id
type: str
id:
description:
- The alert policy id. This is mutually exclusive with name
type: str
alert_recipients:
description:
- A list of recipient email ids to notify the alert.
This is required for state 'present'
type: list
elements: str
metric:
description:
- The metric on which to measure the condition that will trigger the alert.
This is required for state 'present'
type: str
choices: ['cpu','memory','disk']
duration:
description:
- The length of time in minutes that the condition must exceed the threshold.
This is required for state 'present'
type: str
threshold:
description:
- The threshold that will trigger the alert when the metric equals or exceeds it.
This is required for state 'present'
This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
type: int
state:
description:
- Whether to create or delete the policy.
type: str
default: present
choices: ['present','absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
---
- name: Create Alert Policy Example
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Create an Alert Policy for disk above 80% for 5 minutes
community.general.clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
alert_recipients:
- test1@centurylink.com
- test2@centurylink.com
metric: 'disk'
duration: '00:05:00'
threshold: 80
state: present
register: policy
- name: Debug
ansible.builtin.debug: var=policy
- name: Delete Alert Policy Example
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Delete an Alert Policy
community.general.clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
state: absent
register: policy
- name: Debug
ansible.builtin.debug: var=policy
'''
RETURN = '''
policy:
description: The alert policy information
returned: success
type: dict
sample:
{
"actions": [
{
"action": "email",
"settings": {
"recipients": [
"user1@domain.com",
"user1@domain.com"
]
}
}
],
"id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
"links": [
{
"href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
"rel": "self",
"verbs": [
"GET",
"DELETE",
"PUT"
]
}
],
"name": "test_alert",
"triggers": [
{
"duration": "00:05:00",
"metric": "disk",
"threshold": 80.0
}
]
}
'''
__version__ = '${version}'
import json
import os
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcAlertPolicy:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
self.policy_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(),
id=dict(),
alias=dict(required=True),
alert_recipients=dict(type='list', elements='str'),
metric=dict(
choices=[
'cpu',
'memory',
'disk']),
duration=dict(type='str'),
threshold=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
mutually_exclusive = [
['name', 'id']
]
return {'argument_spec': argument_spec,
'mutually_exclusive': mutually_exclusive}
# Module Behavior Goodness
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
self._set_clc_credentials_from_env()
self.policy_dict = self._get_alert_policies(p['alias'])
if p['state'] == 'present':
changed, policy = self._ensure_alert_policy_is_present()
else:
changed, policy = self._ensure_alert_policy_is_absent()
self.module.exit_json(changed=changed, policy=policy)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_alert_policy_is_present(self):
"""
Ensures that the alert policy is present
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the created/updated alert policy
"""
changed = False
p = self.module.params
policy_name = p.get('name')
if not policy_name:
self.module.fail_json(msg='Policy name is a required')
policy = self._alert_policy_exists(policy_name)
if not policy:
changed = True
policy = None
if not self.module.check_mode:
policy = self._create_alert_policy()
else:
changed_u, policy = self._ensure_alert_policy_is_updated(policy)
if changed_u:
changed = True
return changed, policy
def _ensure_alert_policy_is_absent(self):
"""
Ensures that the alert policy is absent
:return: (changed, None)
changed: A flag representing if anything is modified
"""
changed = False
p = self.module.params
alert_policy_id = p.get('id')
alert_policy_name = p.get('name')
alias = p.get('alias')
if not alert_policy_id and not alert_policy_name:
self.module.fail_json(
msg='Either alert policy id or policy name is required')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id(
self.module,
alert_policy_name)
if alert_policy_id and alert_policy_id in self.policy_dict:
changed = True
if not self.module.check_mode:
self._delete_alert_policy(alias, alert_policy_id)
return changed, None
def _ensure_alert_policy_is_updated(self, alert_policy):
"""
Ensures the alert policy is updated if anything is changed in the alert policy configuration
:param alert_policy: the target alert policy
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the updated the alert policy
"""
changed = False
p = self.module.params
alert_policy_id = alert_policy.get('id')
email_list = p.get('alert_recipients')
metric = p.get('metric')
duration = p.get('duration')
threshold = p.get('threshold')
policy = alert_policy
if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
(duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
(threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
changed = True
elif email_list:
t_email_list = list(
alert_policy.get('actions')[0].get('settings').get('recipients'))
if set(email_list) != set(t_email_list):
changed = True
if changed and not self.module.check_mode:
policy = self._update_alert_policy(alert_policy_id)
return changed, policy
def _get_alert_policies(self, alias):
"""
Get the alert policies for account alias by calling the CLC API.
:param alias: the account alias
:return: the alert policies for the account alias
"""
response = {}
policies = self.clc.v2.API.Call('GET',
'/v2/alertPolicies/%s'
% alias)
for policy in policies.get('items'):
response[policy.get('id')] = policy
return response
def _create_alert_policy(self):
"""
Create an alert Policy using the CLC API.
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'POST',
'/v2/alertPolicies/%s' % alias,
arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to create alert policy "{0}". {1}'.format(
policy_name, str(e.response_text)))
return result
def _update_alert_policy(self, alert_policy_id):
"""
Update alert policy using the CLC API.
:param alert_policy_id: The clc alert policy id
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'PUT', '/v2/alertPolicies/%s/%s' %
(alias, alert_policy_id), arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to update alert policy "{0}". {1}'.format(
policy_name, str(e.response_text)))
return result
def _delete_alert_policy(self, alias, policy_id):
"""
Delete an alert policy using the CLC API.
:param alias : the account alias
:param policy_id: the alert policy id
:return: response dictionary from the CLC API.
"""
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/alertPolicies/%s/%s' %
(alias, policy_id), None)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to delete alert policy id "{0}". {1}'.format(
policy_id, str(e.response_text)))
return result
def _alert_policy_exists(self, policy_name):
"""
Check to see if an alert policy exists
:param policy_name: name of the alert policy
:return: boolean of if the policy exists
"""
result = False
for policy_id in self.policy_dict:
if self.policy_dict.get(policy_id).get('name') == policy_name:
result = self.policy_dict.get(policy_id)
return result
def _get_alert_policy_id(self, module, alert_policy_name):
"""
retrieves the alert policy id of the account based on the name of the policy
:param module: the AnsibleModule object
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
for policy_id in self.policy_dict:
if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = policy_id
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcAlertPolicy._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_alert_policy = ClcAlertPolicy(module)
clc_alert_policy.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,309 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_blueprint_package
short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud
description:
- An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
server_ids:
description:
- A list of server Ids to deploy the blue print package.
type: list
required: true
elements: str
package_id:
description:
- The package id of the blue print.
type: str
required: true
package_params:
description:
- The dictionary of arguments required to deploy the blue print.
type: dict
default: {}
required: false
state:
description:
- Whether to install or uninstall the package. Currently it supports only "present" for install action.
type: str
required: false
default: present
choices: ['present']
wait:
description:
- Whether to wait for the tasks to finish before returning.
type: str
default: 'True'
required: false
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Deploy package
community.general.clc_blueprint_package:
server_ids:
- UC1TEST-SERVER1
- UC1TEST-SERVER2
package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
package_params: {}
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SERVER1",
"UC1TEST-SERVER2"
]
'''
__version__ = '${version}'
import os
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcBlueprintPackage:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
changed = False
changed_server_ids = []
self._set_clc_credentials_from_env()
server_ids = p['server_ids']
package_id = p['package_id']
package_params = p['package_params']
state = p['state']
if state == 'present':
changed, changed_server_ids, request_list = self.ensure_package_installed(
server_ids, package_id, package_params)
self._wait_for_requests_to_complete(request_list)
self.module.exit_json(changed=changed, server_ids=changed_server_ids)
@staticmethod
def define_argument_spec():
"""
This function defines the dictionary object required for
package module
:return: the package dictionary object
"""
argument_spec = dict(
server_ids=dict(type='list', elements='str', required=True),
package_id=dict(required=True),
package_params=dict(type='dict', default={}),
wait=dict(default=True), # @FIXME should be bool?
state=dict(default='present', choices=['present'])
)
return argument_spec
def ensure_package_installed(self, server_ids, package_id, package_params):
"""
Ensure the package is installed in the given list of servers
:param server_ids: the server list where the package needs to be installed
:param package_id: the blueprint package id
:param package_params: the package arguments
:return: (changed, server_ids, request_list)
changed: A flag indicating if a change was made
server_ids: The list of servers modified
request_list: The list of request objects from clc-sdk
"""
changed = False
request_list = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to get servers from CLC')
for server in servers:
if not self.module.check_mode:
request = self.clc_install_package(
server,
package_id,
package_params)
request_list.append(request)
changed = True
return changed, server_ids, request_list
def clc_install_package(self, server, package_id, package_params):
"""
Install the package to a given clc server
:param server: The server object where the package needs to be installed
:param package_id: The blue print package id
:param package_params: the required argument dict for the package installation
:return: The result object from the CLC API call
"""
result = None
try:
result = server.ExecutePackage(
package_id=package_id,
parameters=package_params)
except CLCException as ex:
self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
package_id, server.id, ex.message
))
return result
def _wait_for_requests_to_complete(self, request_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param request_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in request_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process package install request')
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: the list of server ids
:param message: the error message to raise if there is any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
self.module.fail_json(msg=message + ': %s' % ex)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
Main function
:return: None
"""
module = AnsibleModule(
argument_spec=ClcBlueprintPackage.define_argument_spec(),
supports_check_mode=True
)
clc_blueprint_package = ClcBlueprintPackage(module)
clc_blueprint_package.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,596 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_firewall_policy
short_description: Create/delete/update firewall policies
description:
- Create or delete or update firewall policies on Centurylink Cloud
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
location:
description:
- Target datacenter for the firewall policy
type: str
required: true
state:
description:
- Whether to create or delete the firewall policy
type: str
default: present
choices: ['present', 'absent']
source:
description:
- The list of source addresses for traffic on the originating firewall.
This is required when state is 'present'
type: list
elements: str
destination:
description:
- The list of destination addresses for traffic on the terminating firewall.
This is required when state is 'present'
type: list
elements: str
ports:
description:
- The list of ports associated with the policy.
TCP and UDP can take in single ports or port ranges.
- "Example: V(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
type: list
elements: str
firewall_policy_id:
description:
- Id of the firewall policy. This is required to update or delete an existing firewall policy
type: str
source_account_alias:
description:
- CLC alias for the source account
type: str
required: true
destination_account_alias:
description:
- CLC alias for the destination account
type: str
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
type: str
default: 'True'
enabled:
description:
- Whether the firewall policy is enabled or disabled
type: str
choices: ['True', 'False']
default: 'True'
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
---
- name: Create Firewall Policy
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Create / Verify an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: present
source: 10.128.216.0/24
destination: 10.128.216.0/24
ports: Any
destination_account_alias: WFAD
- name: Delete Firewall Policy
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Delete an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: absent
firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
'''
RETURN = '''
firewall_policy_id:
description: The fire wall policy id
returned: success
type: str
sample: fc36f1bfd47242e488a9c44346438c05
firewall_policy:
description: The fire wall policy information
returned: success
type: dict
sample:
{
"destination":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"destinationAccount":"wfad",
"enabled":true,
"id":"fc36f1bfd47242e488a9c44346438c05",
"links":[
{
"href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
}
],
"ports":[
"any"
],
"source":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"status":"active"
}
'''
__version__ = '${version}'
import os
import traceback
from ansible.module_utils.six.moves.urllib.parse import urlparse
from time import sleep
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcFirewallPolicy:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.firewall_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
location=dict(required=True),
source_account_alias=dict(required=True),
destination_account_alias=dict(),
firewall_policy_id=dict(),
ports=dict(type='list', elements='str'),
source=dict(type='list', elements='str'),
destination=dict(type='list', elements='str'),
wait=dict(default=True), # @FIXME type=bool
state=dict(default='present', choices=['present', 'absent']),
enabled=dict(default=True, choices=[True, False])
)
return argument_spec
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
firewall_policy = None
location = self.module.params.get('location')
source_account_alias = self.module.params.get('source_account_alias')
destination_account_alias = self.module.params.get(
'destination_account_alias')
firewall_policy_id = self.module.params.get('firewall_policy_id')
ports = self.module.params.get('ports')
source = self.module.params.get('source')
destination = self.module.params.get('destination')
wait = self.module.params.get('wait')
state = self.module.params.get('state')
enabled = self.module.params.get('enabled')
self.firewall_dict = {
'location': location,
'source_account_alias': source_account_alias,
'destination_account_alias': destination_account_alias,
'firewall_policy_id': firewall_policy_id,
'ports': ports,
'source': source,
'destination': destination,
'wait': wait,
'state': state,
'enabled': enabled}
self._set_clc_credentials_from_env()
if state == 'absent':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
source_account_alias, location, self.firewall_dict)
elif state == 'present':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
source_account_alias, location, self.firewall_dict)
return self.module.exit_json(
changed=changed,
firewall_policy_id=firewall_policy_id,
firewall_policy=firewall_policy)
@staticmethod
def _get_policy_id_from_response(response):
"""
Method to parse out the policy id from creation response
:param response: response from firewall creation API call
:return: policy_id: firewall policy id from creation call
"""
url = response.get('links')[0]['href']
path = urlparse(url).path
path_list = os.path.split(path)
policy_id = path_list[-1]
return policy_id
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_firewall_policy_is_present(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: (changed, firewall_policy_id, firewall_policy)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was created/updated
firewall_policy: The firewall_policy object
"""
firewall_policy = None
firewall_policy_id = firewall_dict.get('firewall_policy_id')
if firewall_policy_id is None:
if not self.module.check_mode:
response = self._create_firewall_policy(
source_account_alias,
location,
firewall_dict)
firewall_policy_id = self._get_policy_id_from_response(
response)
changed = True
else:
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if not firewall_policy:
return self.module.fail_json(
msg='Unable to find the firewall policy id : {0}'.format(
firewall_policy_id))
changed = self._compare_get_request_with_dict(
firewall_policy,
firewall_dict)
if not self.module.check_mode and changed:
self._update_firewall_policy(
source_account_alias,
location,
firewall_policy_id,
firewall_dict)
if changed and firewall_policy_id:
firewall_policy = self._wait_for_requests_to_complete(
source_account_alias,
location,
firewall_policy_id)
return changed, firewall_policy_id, firewall_policy
def _ensure_firewall_policy_is_absent(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is removed if present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: firewall policy to delete
:return: (changed, firewall_policy_id, response)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was deleted
response: response from CLC API call
"""
changed = False
response = []
firewall_policy_id = firewall_dict.get('firewall_policy_id')
result = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if result:
if not self.module.check_mode:
response = self._delete_firewall_policy(
source_account_alias,
location,
firewall_policy_id)
changed = True
return changed, firewall_policy_id, response
def _create_firewall_policy(
self,
source_account_alias,
location,
firewall_dict):
"""
Creates the firewall policy for the given account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response from CLC API call
"""
payload = {
'destinationAccount': firewall_dict.get('destination_account_alias'),
'source': firewall_dict.get('source'),
'destination': firewall_dict.get('destination'),
'ports': firewall_dict.get('ports')}
try:
response = self.clc.v2.API.Call(
'POST', '/v2-experimental/firewallPolicies/%s/%s' %
(source_account_alias, location), payload)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to create firewall policy. %s" %
str(e.response_text))
return response
def _delete_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Deletes a given firewall policy for an account alias in a datacenter
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to delete
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to delete the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _update_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id,
firewall_dict):
"""
Updates a firewall policy for a given datacenter and account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to update
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'PUT',
'/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias,
location,
firewall_policy_id),
firewall_dict)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to update the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
@staticmethod
def _compare_get_request_with_dict(response, firewall_dict):
"""
Helper method to compare the json response for getting the firewall policy with the request parameters
:param response: response from the get method
:param firewall_dict: dictionary of request parameters for firewall policy
:return: changed: Boolean that returns true if there are differences between
the response parameters and the playbook parameters
"""
changed = False
response_dest_account_alias = response.get('destinationAccount')
response_enabled = response.get('enabled')
response_source = response.get('source')
response_dest = response.get('destination')
response_ports = response.get('ports')
request_dest_account_alias = firewall_dict.get(
'destination_account_alias')
request_enabled = firewall_dict.get('enabled')
if request_enabled is None:
request_enabled = True
request_source = firewall_dict.get('source')
request_dest = firewall_dict.get('destination')
request_ports = firewall_dict.get('ports')
if (
response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
response_enabled != request_enabled) or (
response_source and response_source != request_source) or (
response_dest and response_dest != request_dest) or (
response_ports and response_ports != request_ports):
changed = True
return changed
def _get_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Get back details for a particular firewall policy
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: id of the firewall policy to get
:return: response - The response from CLC API call
"""
response = None
try:
response = self.clc.v2.API.Call(
'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
if e.response_status_code != 404:
self.module.fail_json(
msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _wait_for_requests_to_complete(
self,
source_account_alias,
location,
firewall_policy_id,
wait_limit=50):
"""
Waits until the CLC requests are complete if the wait argument is True
:param source_account_alias: The source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: The firewall policy id
:param wait_limit: The number of times to check the status for completion
:return: the firewall_policy object
"""
wait = self.module.params.get('wait')
count = 0
firewall_policy = None
while wait:
count += 1
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
status = firewall_policy.get('status')
if status == 'active' or count > wait_limit:
wait = False
else:
# wait for 2 seconds
sleep(2)
return firewall_policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_firewall = ClcFirewallPolicy(module)
clc_firewall.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,522 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- The name of the Server Group
type: str
required: true
description:
description:
- A description of the Server Group
type: str
required: false
parent:
description:
- The parent group of the server group. If parent is not provided, it creates the group at top level.
type: str
required: false
location:
description:
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
associated with the account
type: str
required: false
state:
description:
- Whether to create or delete the group
type: str
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
type: bool
default: true
required: false
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Create a Server Group
---
- name: Create Server Group
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Create / Verify a Server Group at CenturyLink Cloud
community.general.clc_group:
name: My Cool Server Group
parent: Default Group
state: present
register: clc
- name: Debug
ansible.builtin.debug:
var: clc
# Delete a Server Group
- name: Delete Server Group
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
community.general.clc_group:
name: My Cool Server Group
parent: Default Group
state: absent
register: clc
- name: Debug
ansible.builtin.debug:
var: clc
'''
RETURN = '''
group:
description: The group information
returned: success
type: dict
sample:
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":"2015-07-29T18:52:47Z",
"modifiedBy":"service.wfad",
"modifiedDate":"2015-07-29T18:52:47Z"
},
"customFields":[
],
"description":"test group",
"groups":[
],
"id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
"links":[
{
"href":"/v2/groups/wfad",
"rel":"createGroup",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad",
"rel":"createServer",
"verbs":[
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"parentGroup"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
"rel":"defaults",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
"rel":"billing"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
"rel":"archiveGroupAction"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
"rel":"statistics"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
"rel":"horizontalAutoscalePolicyMapping",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
}
],
"locationId":"UC1",
"name":"test group",
"status":"active",
"type":"default"
}
'''
__version__ = '${version}'
import os
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcGroup(object):
clc = None
root_group = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
location = self.module.params.get('location')
group_name = self.module.params.get('name')
parent_name = self.module.params.get('parent')
group_description = self.module.params.get('description')
state = self.module.params.get('state')
self._set_clc_credentials_from_env()
self.group_dict = self._get_group_tree_for_datacenter(
datacenter=location)
if state == "absent":
changed, group, requests = self._ensure_group_is_absent(
group_name=group_name, parent_name=parent_name)
if requests:
self._wait_for_requests_to_complete(requests)
else:
changed, group = self._ensure_group_is_present(
group_name=group_name, parent_name=parent_name, group_description=group_description)
try:
group = group.data
except AttributeError:
group = group_name
self.module.exit_json(changed=changed, group=group)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(),
parent=dict(),
location=dict(),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=True))
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_group_is_absent(self, group_name, parent_name):
"""
Ensure that group_name is absent by deleting it if necessary
:param group_name: string - the name of the clc server group to delete
:param parent_name: string - the name of the parent group for group_name
:return: changed, group
"""
changed = False
group = []
results = []
if self._group_exists(group_name=group_name, parent_name=parent_name):
if not self.module.check_mode:
group.append(group_name)
result = self._delete_group(group_name)
results.append(result)
changed = True
return changed, group, results
def _delete_group(self, group_name):
"""
Delete the provided server group
:param group_name: string - the server group to delete
:return: none
"""
response = None
group, parent = self.group_dict.get(group_name)
try:
response = group.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
group_name, ex.response_text
))
return response
def _ensure_group_is_present(
self,
group_name,
parent_name,
group_description):
"""
Checks to see if a server group exists, creates it if it doesn't.
:param group_name: the name of the group to validate/create
:param parent_name: the name of the parent group for group_name
:param group_description: a short description of the server group (used when creating)
:return: (changed, group) -
changed: Boolean- whether a change was made,
group: A clc group object for the group
"""
if not self.root_group:
raise AssertionError("Implementation Error: Root Group not set")
parent = parent_name if parent_name is not None else self.root_group.name
description = group_description
changed = False
group = group_name
parent_exists = self._group_exists(group_name=parent, parent_name=None)
child_exists = self._group_exists(
group_name=group_name,
parent_name=parent)
if parent_exists and child_exists:
group, parent = self.group_dict[group_name]
changed = False
elif parent_exists and not child_exists:
if not self.module.check_mode:
group = self._create_group(
group=group,
parent=parent,
description=description)
changed = True
else:
self.module.fail_json(
msg="parent group: " +
parent +
" does not exist")
return changed, group
def _create_group(self, group, parent, description):
"""
Create the provided server group
:param group: clc_sdk.Group - the group to create
:param parent: clc_sdk.Parent - the parent group for {group}
:param description: string - a text description of the group
:return: clc_sdk.Group - the created group
"""
response = None
(parent, grandparent) = self.group_dict[parent]
try:
response = parent.Create(name=group, description=description)
except CLCException as ex:
self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
group, ex.response_text))
return response
def _group_exists(self, group_name, parent_name):
"""
Check to see if a group exists
:param group_name: string - the group to check
:param parent_name: string - the parent of group_name
:return: boolean - whether the group exists
"""
result = False
if group_name in self.group_dict:
(group, parent) = self.group_dict[group_name]
if parent_name is None or parent_name == parent.name:
result = True
return result
def _get_group_tree_for_datacenter(self, datacenter=None):
"""
Walk the tree of groups for a datacenter
:param datacenter: string - the datacenter to walk (ex: 'UC1')
:return: a dictionary of groups and parents
"""
self.root_group = self.clc.v2.Datacenter(
location=datacenter).RootGroup()
return self._walk_groups_recursive(
parent_group=None,
child_group=self.root_group)
def _walk_groups_recursive(self, parent_group, child_group):
"""
Walk a parent-child tree of groups, starting with the provided child group
:param parent_group: clc_sdk.Group - the parent group to start the walk
:param child_group: clc_sdk.Group - the child group to start the walk
:return: a dictionary of groups and parents
"""
result = {str(child_group): (child_group, parent_group)}
groups = child_group.Subgroups().groups
if len(groups) > 0:
for group in groups:
if group.type != 'default':
continue
result.update(self._walk_groups_recursive(child_group, group))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process group request')
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcGroup._define_module_argument_spec(),
supports_check_mode=True)
clc_group = ClcGroup(module)
clc_group.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,945 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_loadbalancer
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud
description:
- An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- The name of the loadbalancer
type: str
required: true
description:
description:
- A description for the loadbalancer
type: str
alias:
description:
- The alias of your CLC Account
type: str
required: true
location:
description:
- The location of the datacenter where the load balancer resides in
type: str
required: true
method:
description:
-The balancing method for the load balancer pool
type: str
choices: ['leastConnection', 'roundRobin']
persistence:
description:
- The persistence method for the load balancer
type: str
choices: ['standard', 'sticky']
port:
description:
- Port to configure on the public-facing side of the load balancer pool
type: str
choices: ['80', '443']
nodes:
description:
- A list of nodes that needs to be added to the load balancer pool
type: list
default: []
elements: dict
status:
description:
- The status of the loadbalancer
type: str
default: enabled
choices: ['enabled', 'disabled']
state:
description:
- Whether to create or delete the load balancer pool
type: str
default: present
choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Create things
community.general.clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: present
- name: Add node to an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
community.general.clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
state: nodes_present
- name: Remove node from an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
community.general.clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
state: nodes_absent
- name: Delete LoadbalancerPool
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
community.general.clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: port_absent
- name: Delete Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
community.general.clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: absent
'''
RETURN = '''
loadbalancer:
description: The load balancer result object from CLC
returned: success
type: dict
sample:
{
"description":"test-lb",
"id":"ab5b18cb81e94ab9925b61d1ca043fb5",
"ipAddress":"66.150.174.197",
"links":[
{
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
"rel":"pools",
"verbs":[
"GET",
"POST"
]
}
],
"name":"test-lb",
"pools":[
],
"status":"enabled"
}
'''
__version__ = '${version}'
import json
import os
import traceback
from time import sleep
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcLoadBalancer:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.lb_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
result_lb = None
loadbalancer_name = self.module.params.get('name')
loadbalancer_alias = self.module.params.get('alias')
loadbalancer_location = self.module.params.get('location')
loadbalancer_description = self.module.params.get('description')
loadbalancer_port = self.module.params.get('port')
loadbalancer_method = self.module.params.get('method')
loadbalancer_persistence = self.module.params.get('persistence')
loadbalancer_nodes = self.module.params.get('nodes')
loadbalancer_status = self.module.params.get('status')
state = self.module.params.get('state')
if loadbalancer_description is None:
loadbalancer_description = loadbalancer_name
self._set_clc_credentials_from_env()
self.lb_dict = self._get_loadbalancer_list(
alias=loadbalancer_alias,
location=loadbalancer_location)
if state == 'present':
changed, result_lb, lb_id = self.ensure_loadbalancer_present(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location,
description=loadbalancer_description,
status=loadbalancer_status)
if loadbalancer_port:
changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
lb_id=lb_id,
alias=loadbalancer_alias,
location=loadbalancer_location,
method=loadbalancer_method,
persistence=loadbalancer_persistence,
port=loadbalancer_port)
if loadbalancer_nodes:
changed, result_nodes = self.ensure_lbpool_nodes_set(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'absent':
changed, result_lb = self.ensure_loadbalancer_absent(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location)
elif state == 'port_absent':
changed, result_lb = self.ensure_loadbalancerpool_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port)
elif state == 'nodes_present':
changed, result_lb = self.ensure_lbpool_nodes_present(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'nodes_absent':
changed, result_lb = self.ensure_lbpool_nodes_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
self.module.exit_json(changed=changed, loadbalancer=result_lb)
def ensure_loadbalancer_present(
self, name, alias, location, description, status):
"""
Checks to see if a load balancer exists and creates one if it does not.
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description of loadbalancer
:param status: Enabled / Disabled
:return: (changed, result, lb_id)
changed: Boolean whether a change was made
result: The result object from the CLC load balancer request
lb_id: The load balancer id
"""
changed = False
result = name
lb_id = self._loadbalancer_exists(name=name)
if not lb_id:
if not self.module.check_mode:
result = self.create_loadbalancer(name=name,
alias=alias,
location=location,
description=description,
status=status)
lb_id = result.get('id')
changed = True
return changed, result, lb_id
def ensure_loadbalancerpool_present(
self, lb_id, alias, location, method, persistence, port):
"""
Checks to see if a load balancer pool exists and creates one if it does not.
:param lb_id: The loadbalancer id
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: (changed, group, pool_id) -
changed: Boolean whether a change was made
result: The result from the CLC API call
pool_id: The string id of the load balancer pool
"""
changed = False
result = port
if not lb_id:
return changed, None, None
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if not pool_id:
if not self.module.check_mode:
result = self.create_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
method=method,
persistence=persistence,
port=port)
pool_id = result.get('id')
changed = True
return changed, result, pool_id
def ensure_loadbalancer_absent(self, name, alias, location):
"""
Checks to see if a load balancer exists and deletes it if it does
:param name: Name of the load balancer
:param alias: Alias of account
:param location: Datacenter
:return: (changed, result)
changed: Boolean whether a change was made
result: The result from the CLC API Call
"""
changed = False
result = name
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
if not self.module.check_mode:
result = self.delete_loadbalancer(alias=alias,
location=location,
name=name)
changed = True
return changed, result
def ensure_loadbalancerpool_absent(self, alias, location, name, port):
"""
Checks to see if a load balancer pool exists and deletes it if it does
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer listens on
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = None
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed = True
if not self.module.check_mode:
result = self.delete_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id)
else:
result = "Pool doesn't exist"
else:
result = "LB Doesn't Exist"
return changed, result
def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool
and set the nodes if any in the list those doesn't exist
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: The list of nodes to be updated to the pool
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
result = {}
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_check=nodes)
if not nodes_exist:
changed = True
result = self.set_loadbalancernodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be added
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.add_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_add=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and removes them if found any
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be removed
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.remove_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_remove=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def create_loadbalancer(self, name, alias, location, description, status):
"""
Create a loadbalancer w/ params
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description for loadbalancer to be created
:param status: Enabled / Disabled
:return: result: The result from the CLC API call
"""
result = None
try:
result = self.clc.v2.API.Call('POST',
'/v2/sharedLoadBalancers/%s/%s' % (alias,
location),
json.dumps({"name": name,
"description": description,
"status": status}))
sleep(1)
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def create_loadbalancerpool(
self, alias, location, lb_id, method, persistence, port):
"""
Creates a pool on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: result: The result from the create API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id), json.dumps(
{
"port": port, "method": method, "persistence": persistence
}))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def delete_loadbalancer(self, alias, location, name):
"""
Delete CLC loadbalancer
:param alias: Alias for account
:param location: Datacenter
:param name: Name of the loadbalancer to delete
:return: result: The result from the CLC API call
"""
result = None
lb_id = self._get_loadbalancer_id(name=name)
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
(alias, location, lb_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
"""
Delete the pool on the provided load balancer
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the load balancer pool
:return: result: The result from the delete API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
(alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def _get_loadbalancer_id(self, name):
"""
Retrieves unique ID of loadbalancer
:param name: Name of loadbalancer
:return: Unique ID of the loadbalancer
"""
id = None
for lb in self.lb_dict:
if lb.get('name') == name:
id = lb.get('id')
return id
def _get_loadbalancer_list(self, alias, location):
"""
Retrieve a list of loadbalancers
:param alias: Alias for account
:param location: Datacenter
:return: JSON data for all loadbalancers at datacenter
"""
result = None
try:
result = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch load balancers for account: {0}. {1}'.format(
alias, str(e.response_text)))
return result
def _loadbalancer_exists(self, name):
"""
Verify a loadbalancer exists
:param name: Name of loadbalancer
:return: False or the ID of the existing loadbalancer
"""
result = False
for lb in self.lb_dict:
if lb.get('name') == name:
result = lb.get('id')
return result
def _loadbalancerpool_exists(self, alias, location, port, lb_id):
"""
Checks to see if a pool exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param port: the port to check and see if it exists
:param lb_id: the id string of the provided load balancer
:return: result: The id string of the pool or False
"""
result = False
try:
pool_list = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
lb_id, str(e.response_text)))
for pool in pool_list:
if int(pool.get('port')) == int(port):
result = pool.get('id')
return result
def _loadbalancerpool_nodes_exists(
self, alias, location, lb_id, pool_id, nodes_to_check):
"""
Checks to see if a set of nodes exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the provided load balancer
:param pool_id: the id string of the load balancer pool
:param nodes_to_check: the list of nodes to check for
:return: result: True / False indicating if the given nodes exist
"""
result = False
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_check:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
result = True
else:
result = False
return result
def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
"""
Updates nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes: a list of dictionaries containing the nodes to set
:return: result: The result from the CLC API call
"""
result = None
if not lb_id:
return result
if not self.module.check_mode:
try:
result = self.clc.v2.API.Call('PUT',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id), json.dumps(nodes))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
pool_id, str(e.response_text)))
return result
def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
"""
Add nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_add: a list of dictionaries containing the nodes to add
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_add:
if not node.get('status'):
node['status'] = 'enabled'
if node not in nodes:
changed = True
nodes.append(node)
if changed is True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def remove_lbpool_nodes(
self, alias, location, lb_id, pool_id, nodes_to_remove):
"""
Removes nodes from the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_remove: a list of dictionaries containing the nodes to remove
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_remove:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
changed = True
nodes.remove(node)
if changed is True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
"""
Return the list of nodes available to the provided load balancer pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:return: result: The list of nodes
"""
result = None
try:
result = self.clc.v2.API.Call('GET',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
pool_id, str(e.response_text)))
return result
@staticmethod
def define_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(),
location=dict(required=True),
alias=dict(required=True),
port=dict(choices=[80, 443]),
method=dict(choices=['leastConnection', 'roundRobin']),
persistence=dict(choices=['standard', 'sticky']),
nodes=dict(type='list', default=[], elements='dict'),
status=dict(default='enabled', choices=['enabled', 'disabled']),
state=dict(
default='present',
choices=[
'present',
'absent',
'port_absent',
'nodes_present',
'nodes_absent'])
)
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
supports_check_mode=True)
clc_loadbalancer = ClcLoadBalancer(module)
clc_loadbalancer.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,975 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_modify_server
short_description: Modify servers in CenturyLink Cloud
description:
- An Ansible module to modify servers in CenturyLink Cloud.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
server_ids:
description:
- A list of server Ids to modify.
type: list
required: true
elements: str
cpu:
description:
- How many CPUs to update on the server
type: str
memory:
description:
- Memory (in GB) to set to the server.
type: str
anti_affinity_policy_id:
description:
- The anti affinity policy id to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_name'
type: str
anti_affinity_policy_name:
description:
- The anti affinity policy name to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_id'
type: str
alert_policy_id:
description:
- The alert policy id to be associated to the server.
This is mutually exclusive with 'alert_policy_name'
type: str
alert_policy_name:
description:
- The alert policy name to be associated to the server.
This is mutually exclusive with 'alert_policy_id'
type: str
state:
description:
- The state to insure that the provided resources are in.
type: str
default: 'present'
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
type: bool
default: true
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Set the cpu count to 4 on a server
community.general.clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 4
state: present
- name: Set the memory to 8GB on a server
community.general.clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
memory: 8
state: present
- name: Set the anti affinity policy on a server
community.general.clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: present
- name: Remove the anti affinity policy on a server
community.general.clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: absent
- name: Add the alert policy on a server
community.general.clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: present
- name: Remove the alert policy on a server
community.general.clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: absent
- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
community.general.clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 8
memory: 16
state: present
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects that are changed
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
import json
import os
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcModifyServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
p = self.module.params
cpu = p.get('cpu')
memory = p.get('memory')
state = p.get('state')
if state == 'absent' and (cpu or memory):
return self.module.fail_json(
msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to modify: %s' %
server_ids)
(changed, server_dict_array, changed_server_ids) = self._modify_servers(
server_ids=server_ids)
self.module.exit_json(
changed=changed,
server_ids=changed_server_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True, elements='str'),
state=dict(default='present', choices=['present', 'absent']),
cpu=dict(),
memory=dict(),
anti_affinity_policy_id=dict(),
anti_affinity_policy_name=dict(),
alert_policy_id=dict(),
alert_policy_name=dict(),
wait=dict(type='bool', default=True)
)
mutually_exclusive = [
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name']
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: the error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex.message)
def _modify_servers(self, server_ids):
"""
modify the servers configuration on the provided list
:param server_ids: list of servers to modify
:return: a list of dictionaries with server information about the servers that were modified
"""
p = self.module.params
state = p.get('state')
server_params = {
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
'alert_policy_id': p.get('alert_policy_id'),
'alert_policy_name': p.get('alert_policy_name'),
}
changed = False
server_changed = False
aa_changed = False
ap_changed = False
server_dict_array = []
result_server_ids = []
request_list = []
changed_servers = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return self.module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
for server in servers:
if state == 'present':
server_changed, server_result = self._ensure_server_config(
server, server_params)
if server_result:
request_list.append(server_result)
aa_changed = self._ensure_aa_policy_present(
server,
server_params)
ap_changed = self._ensure_alert_policy_present(
server,
server_params)
elif state == 'absent':
aa_changed = self._ensure_aa_policy_absent(
server,
server_params)
ap_changed = self._ensure_alert_policy_absent(
server,
server_params)
if server_changed or aa_changed or ap_changed:
changed_servers.append(server)
changed = True
self._wait_for_requests(self.module, request_list)
self._refresh_servers(self.module, changed_servers)
for server in changed_servers:
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
def _ensure_server_config(
self, server, server_params):
"""
ensures the server is updated with the provided cpu and memory
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
cpu = server_params.get('cpu')
memory = server_params.get('memory')
changed = False
result = None
if not cpu:
cpu = server.cpu
if not memory:
memory = server.memory
if memory != server.memory or cpu != server.cpu:
if not self.module.check_mode:
result = self._modify_clc_server(
self.clc,
self.module,
server.id,
cpu,
memory)
changed = True
return changed, result
@staticmethod
def _modify_clc_server(clc, module, server_id, cpu, memory):
"""
Modify the memory or CPU of a clc server.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param server_id: id of the server to modify
:param cpu: the new cpu value
:param memory: the new memory value
:return: the result of CLC API call
"""
result = None
acct_alias = clc.v2.Account.GetAlias()
try:
# Update the server configuration
job_obj = clc.v2.API.Call('PATCH',
'servers/%s/%s' % (acct_alias,
server_id),
json.dumps([{"op": "set",
"member": "memory",
"value": memory},
{"op": "set",
"member": "cpu",
"value": cpu}]))
result = clc.v2.Requests(job_obj)
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to update the server configuration for server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process modify server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
def _ensure_aa_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided anti affinity policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id != current_aa_policy_id:
self._modify_aa_policy(
self.clc,
self.module,
acct_alias,
server.id,
aa_policy_id)
changed = True
return changed
def _ensure_aa_policy_absent(
self, server, server_params):
"""
ensures the provided anti affinity policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id == current_aa_policy_id:
self._delete_aa_policy(
self.clc,
self.module,
acct_alias,
server.id)
changed = True
return changed
@staticmethod
def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
"""
modifies the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param aa_policy_id: the anti affinity policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('PUT',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({"id": aa_policy_id}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _delete_aa_policy(clc, module, acct_alias, server_id):
"""
Delete the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(
msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
alias, str(ex.response_text)))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _get_aa_policy_id_of_server(clc, module, alias, server_id):
"""
retrieves the anti affinity policy id of the server based on the CLC server id
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param server_id: the CLC server id
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
result = clc.v2.API.Call(
method='GET', url='servers/%s/%s/antiAffinityPolicy' %
(alias, server_id))
aa_policy_id = result.get('id')
except APIFailedResponse as ex:
if ex.response_status_code != 404:
module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
server_id, str(ex.response_text)))
return aa_policy_id
def _ensure_alert_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided alert policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and not self._alert_policy_exists(
server, alert_policy_id):
self._add_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
def _ensure_alert_policy_absent(
self, server, server_params):
"""
ensures the alert policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and self._alert_policy_exists(
server, alert_policy_id):
self._remove_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
@staticmethod
def _add_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
add the alert policy to CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('POST',
'servers/%s/%s/alertPolicies' % (
acct_alias,
server_id),
json.dumps({"id": alert_policy_id}))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _remove_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
remove the alert policy to the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/alertPolicies/%s'
% (acct_alias, server_id, alert_policy_id))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
retrieves the alert policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
try:
alert_policies = clc.v2.API.Call(method='GET',
url='alertPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
alias, str(ex.response_text)))
for alert_policy in alert_policies.get('items'):
if alert_policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = alert_policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _alert_policy_exists(server, alert_policy_id):
"""
Checks if the alert policy exists for the server
:param server: the clc server object
:param alert_policy_id: the alert policy
:return: True: if the given alert policy id associated to the server, False otherwise
"""
result = False
alert_policies = server.alertPolicies
if alert_policies:
for alert_policy in alert_policies:
if alert_policy.get('id') == alert_policy_id:
result = True
return result
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcModifyServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_modify_server = ClcModifyServer(module)
clc_modify_server.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,369 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_publicip
short_description: Add and Delete public ips on servers in CenturyLink Cloud
description:
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
protocol:
description:
- The protocol that the public IP will listen for.
type: str
default: TCP
choices: ['TCP', 'UDP', 'ICMP']
ports:
description:
- A list of ports to expose. This is required when state is 'present'
type: list
elements: int
server_ids:
description:
- A list of servers to create public ips on.
type: list
required: true
elements: str
state:
description:
- Determine whether to create or delete public IPs. If present module will not create a second public ip if one
already exists.
type: str
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
type: bool
default: true
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Add Public IP to Server
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Create Public IP For Servers
community.general.clc_publicip:
protocol: TCP
ports:
- 80
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: present
register: clc
- name: Debug
ansible.builtin.debug:
var: clc
- name: Delete Public IP from Server
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Create Public IP For Servers
community.general.clc_publicip:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: absent
register: clc
- name: Debug
ansible.builtin.debug:
var: clc
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcPublicIp(object):
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
params = self.module.params
server_ids = params['server_ids']
ports = params['ports']
protocol = params['protocol']
state = params['state']
if state == 'present':
changed, changed_server_ids, requests = self.ensure_public_ip_present(
server_ids=server_ids, protocol=protocol, ports=ports)
elif state == 'absent':
changed, changed_server_ids, requests = self.ensure_public_ip_absent(
server_ids=server_ids)
else:
return self.module.fail_json(msg="Unknown State: " + state)
self._wait_for_requests_to_complete(requests)
return self.module.exit_json(changed=changed,
server_ids=changed_server_ids)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True, elements='str'),
protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
ports=dict(type='list', elements='int'),
wait=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
)
return argument_spec
def ensure_public_ip_present(self, server_ids, protocol, ports):
"""
Ensures the given server ids having the public ip available
:param server_ids: the list of server ids
:param protocol: the ip protocol
:param ports: the list of ports to expose
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) == 0]
ports_to_expose = [{'protocol': protocol, 'port': port}
for port in ports]
for server in servers_to_change:
if not self.module.check_mode:
result = self._add_publicip_to_server(server, ports_to_expose)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _add_publicip_to_server(self, server, ports_to_expose):
result = None
try:
result = server.PublicIPs().Add(ports_to_expose)
except CLCException as ex:
self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_public_ip_absent(self, server_ids):
"""
Ensures the given server ids having the public ip removed if there is any
:param server_ids: the list of server ids
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) > 0]
for server in servers_to_change:
if not self.module.check_mode:
result = self._remove_publicip_from_server(server)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _remove_publicip_from_server(self, server):
result = None
try:
for ip_address in server.PublicIPs().public_ips:
result = ip_address.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process public ip request')
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_ids, message):
"""
Gets list of servers form CLC api
"""
try:
return self.clc.v2.Servers(server_ids).servers
except CLCException as exception:
self.module.fail_json(msg=message + ': %s' % exception)
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcPublicIp._define_module_argument_spec(),
supports_check_mode=True
)
clc_public_ip = ClcPublicIp(module)
clc_public_ip.process_request()
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,419 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_server_snapshot
short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud
description:
- An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
server_ids:
description:
- The list of CLC server Ids.
type: list
required: true
elements: str
expiration_days:
description:
- The number of days to keep the server snapshot before it expires.
type: int
default: 7
required: false
state:
description:
- The state to insure that the provided resources are in.
type: str
default: 'present'
required: false
choices: ['present', 'absent', 'restore']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: 'True'
required: false
type: str
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create server snapshot
community.general.clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
expiration_days: 10
wait: true
state: present
- name: Restore server snapshot
community.general.clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: true
state: restore
- name: Delete server snapshot
community.general.clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: true
state: absent
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
import traceback
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcSnapshot:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
server_ids = p['server_ids']
expiration_days = p['expiration_days']
state = p['state']
request_list = []
changed = False
changed_servers = []
self._set_clc_credentials_from_env()
if state == 'present':
changed, request_list, changed_servers = self.ensure_server_snapshot_present(
server_ids=server_ids,
expiration_days=expiration_days)
elif state == 'absent':
changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
server_ids=server_ids)
elif state == 'restore':
changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
server_ids=server_ids)
self._wait_for_requests_to_complete(request_list)
return self.module.exit_json(
changed=changed,
server_ids=changed_servers)
def ensure_server_snapshot_present(self, server_ids, expiration_days):
"""
Ensures the given set of server_ids have the snapshots created
:param server_ids: The list of server_ids to create the snapshot
:param expiration_days: The number of days to keep the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) == 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._create_server_snapshot(server, expiration_days)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _create_server_snapshot(self, server, expiration_days):
"""
Create the snapshot for the CLC server
:param server: the CLC server object
:param expiration_days: The number of days to keep the snapshot
:return: the create request object from CLC API Call
"""
result = None
try:
result = server.CreateSnapshot(
delete_existing=True,
expiration_days=expiration_days)
except CLCException as ex:
self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_absent(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots removed
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._delete_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _delete_server_snapshot(self, server):
"""
Delete snapshot for the CLC server
:param server: the CLC server object
:return: the delete snapshot request object from CLC API
"""
result = None
try:
result = server.DeleteSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_restore(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots restored
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._restore_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _restore_server_snapshot(self, server):
"""
Restore snapshot for the CLC server
:param server: the CLC server object
:return: the restore snapshot request object from CLC API
"""
result = None
try:
result = server.RestoreSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process server snapshot request')
@staticmethod
def define_argument_spec():
"""
This function defines the dictionary object required for
package module
:return: the package dictionary object
"""
argument_spec = dict(
server_ids=dict(type='list', required=True, elements='str'),
expiration_days=dict(default=7, type='int'),
wait=dict(default=True),
state=dict(
default='present',
choices=[
'present',
'absent',
'restore']),
)
return argument_spec
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: The error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
Main function
:return: None
"""
module = AnsibleModule(
argument_spec=ClcSnapshot.define_argument_spec(),
supports_check_mode=True
)
clc_snapshot = ClcSnapshot(module)
clc_snapshot.process_request()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,133 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: cloud_init_data_facts
short_description: Retrieve facts of cloud-init
description:
- Gathers facts by reading the status.json and result.json of cloud-init.
author: René Moser (@resmo)
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.facts
- community.general.attributes.facts_module
options:
filter:
description:
- Filter facts
type: str
choices: [ status, result ]
notes:
- See http://cloudinit.readthedocs.io/ for more information about cloud-init.
'''
EXAMPLES = '''
- name: Gather all facts of cloud init
community.general.cloud_init_data_facts:
register: result
- ansible.builtin.debug:
var: result
- name: Wait for cloud init to finish
community.general.cloud_init_data_facts:
filter: status
register: res
until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
retries: 50
delay: 5
'''
RETURN = '''
---
cloud_init_data_facts:
description: Facts of result and status.
returned: success
type: dict
sample: '{
"status": {
"v1": {
"datasource": "DataSourceCloudStack",
"errors": []
},
"result": {
"v1": {
"datasource": "DataSourceCloudStack",
"init": {
"errors": [],
"finished": 1522066377.0185432,
"start": 1522066375.2648022
},
"init-local": {
"errors": [],
"finished": 1522066373.70919,
"start": 1522066373.4726632
},
"modules-config": {
"errors": [],
"finished": 1522066380.9097016,
"start": 1522066379.0011985
},
"modules-final": {
"errors": [],
"finished": 1522066383.56594,
"start": 1522066382.3449218
},
"stage": null
}
}'
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
CLOUD_INIT_PATH = "/var/lib/cloud/data"
def gather_cloud_init_data_facts(module):
res = {
'cloud_init_data_facts': dict()
}
for i in ['result', 'status']:
filter = module.params.get('filter')
if filter is None or filter == i:
res['cloud_init_data_facts'][i] = dict()
json_file = os.path.join(CLOUD_INIT_PATH, i + '.json')
if os.path.exists(json_file):
f = open(json_file, 'rb')
contents = to_text(f.read(), errors='surrogate_or_strict')
f.close()
if contents:
res['cloud_init_data_facts'][i] = module.from_json(contents)
return res
def main():
module = AnsibleModule(
argument_spec=dict(
filter=dict(choices=['result', 'status']),
),
supports_check_mode=True,
)
facts = gather_cloud_init_data_facts(module)
result = dict(changed=False, ansible_facts=facts, **facts)
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,949 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cloudflare_dns
author:
- Michael Gruener (@mgruener)
short_description: Manage Cloudflare DNS records
description:
- "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)."
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
api_token:
description:
- API token.
- Required for api token authentication.
- "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
- Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0.
type: str
required: false
version_added: '0.2.0'
account_api_key:
description:
- Account API key.
- Required for api keys authentication.
- "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
type: str
required: false
aliases: [ account_api_token ]
account_email:
description:
- Account email. Required for API keys authentication.
type: str
required: false
algorithm:
description:
- Algorithm number.
- Required for O(type=DS) and O(type=SSHFP) when O(state=present).
type: int
cert_usage:
description:
- Certificate usage number.
- Required for O(type=TLSA) when O(state=present).
type: int
choices: [ 0, 1, 2, 3 ]
flag:
description:
- Issuer Critical Flag.
- Required for O(type=CAA) when O(state=present).
type: int
choices: [ 0, 1 ]
version_added: 8.0.0
tag:
description:
- CAA issue restriction.
- Required for O(type=CAA) when O(state=present).
type: str
choices: [ issue, issuewild, iodef ]
version_added: 8.0.0
hash_type:
description:
- Hash type number.
- Required for O(type=DS), O(type=SSHFP) and O(type=TLSA) when O(state=present).
type: int
choices: [ 1, 2 ]
key_tag:
description:
- DNSSEC key tag.
- Needed for O(type=DS) when O(state=present).
type: int
port:
description:
- Service port.
- Required for O(type=SRV) and O(type=TLSA).
type: int
priority:
description:
- Record priority.
- Required for O(type=MX) and O(type=SRV)
default: 1
type: int
proto:
description:
- Service protocol. Required for O(type=SRV) and O(type=TLSA).
- Common values are TCP and UDP.
type: str
proxied:
description:
- Proxy through Cloudflare network or just use DNS.
type: bool
default: false
record:
description:
- Record to add.
- Required if O(state=present).
- Default is V(@) (that is, the zone name).
type: str
default: '@'
aliases: [ name ]
selector:
description:
- Selector number.
- Required for O(type=TLSA) when O(state=present).
choices: [ 0, 1 ]
type: int
service:
description:
- Record service.
- Required for O(type=SRV).
type: str
solo:
description:
- Whether the record should be the only one for that record type and record name.
- Only use with O(state=present).
- This will delete all other records with the same record name and type.
type: bool
state:
description:
- Whether the record(s) should exist or not.
type: str
choices: [ absent, present ]
default: present
timeout:
description:
- Timeout for Cloudflare API calls.
type: int
default: 30
ttl:
description:
- The TTL to give the new record.
- Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
type: int
default: 1
type:
description:
- The type of DNS record to create. Required if O(state=present).
- Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by CloudFlare.
type: str
choices: [ A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT ]
value:
description:
- The record value.
- Required for O(state=present).
type: str
aliases: [ content ]
weight:
description:
- Service weight.
- Required for O(type=SRV).
type: int
default: 1
zone:
description:
- The name of the Zone to work with (e.g. "example.com").
- The Zone must already exist.
type: str
required: true
aliases: [ domain ]
'''
EXAMPLES = r'''
- name: Create a test.example.net A record to point to 127.0.0.1
community.general.cloudflare_dns:
zone: example.net
record: test
type: A
value: 127.0.0.1
account_email: test@example.com
account_api_key: dummyapitoken
register: record
- name: Create a record using api token
community.general.cloudflare_dns:
zone: example.net
record: test
type: A
value: 127.0.0.1
api_token: dummyapitoken
- name: Create a example.net CNAME record to example.com
community.general.cloudflare_dns:
zone: example.net
type: CNAME
value: example.com
account_email: test@example.com
account_api_key: dummyapitoken
state: present
- name: Change its TTL
community.general.cloudflare_dns:
zone: example.net
type: CNAME
value: example.com
ttl: 600
account_email: test@example.com
account_api_key: dummyapitoken
state: present
- name: Delete the record
community.general.cloudflare_dns:
zone: example.net
type: CNAME
value: example.com
account_email: test@example.com
account_api_key: dummyapitoken
state: absent
- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network
community.general.cloudflare_dns:
zone: example.net
type: CNAME
value: example.com
proxied: true
account_email: test@example.com
account_api_key: dummyapitoken
state: present
# This deletes all other TXT records named "test.example.net"
- name: Create TXT record "test.example.net" with value "unique value"
community.general.cloudflare_dns:
domain: example.net
record: test
type: TXT
value: unique value
solo: true
account_email: test@example.com
account_api_key: dummyapitoken
state: present
- name: Create an SRV record _foo._tcp.example.net
community.general.cloudflare_dns:
domain: example.net
service: foo
proto: tcp
port: 3500
priority: 10
weight: 20
type: SRV
value: fooserver.example.net
- name: Create a SSHFP record login.example.com
community.general.cloudflare_dns:
zone: example.com
record: login
type: SSHFP
algorithm: 4
hash_type: 2
value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
- name: Create a TLSA record _25._tcp.mail.example.com
community.general.cloudflare_dns:
zone: example.com
record: mail
port: 25
proto: tcp
type: TLSA
cert_usage: 3
selector: 1
hash_type: 1
value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
- name: Create a CAA record subdomain.example.com
community.general.cloudflare_dns:
zone: example.com
record: subdomain
type: CAA
flag: 0
tag: issue
value: ca.example.com
- name: Create a DS record for subdomain.example.com
community.general.cloudflare_dns:
zone: example.com
record: subdomain
type: DS
key_tag: 5464
algorithm: 8
hash_type: 2
value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
'''
RETURN = r'''
record:
description: A dictionary containing the record data.
returned: success, except on record deletion
type: complex
contains:
content:
description: The record content (details depend on record type).
returned: success
type: str
sample: 192.0.2.91
created_on:
description: The record creation date.
returned: success
type: str
sample: "2016-03-25T19:09:42.516553Z"
data:
description: Additional record data.
returned: success, if type is SRV, DS, SSHFP TLSA or CAA
type: dict
sample: {
name: "jabber",
port: 8080,
priority: 10,
proto: "_tcp",
service: "_xmpp",
target: "jabberhost.sample.com",
weight: 5,
}
id:
description: The record ID.
returned: success
type: str
sample: f9efb0549e96abcb750de63b38c9576e
locked:
description: No documentation available.
returned: success
type: bool
sample: false
meta:
description: No documentation available.
returned: success
type: dict
sample: { auto_added: false }
modified_on:
description: Record modification date.
returned: success
type: str
sample: "2016-03-25T19:09:42.516553Z"
name:
description: The record name as FQDN (including _service and _proto for SRV).
returned: success
type: str
sample: www.sample.com
priority:
description: Priority of the MX record.
returned: success, if type is MX
type: int
sample: 10
proxiable:
description: Whether this record can be proxied through Cloudflare.
returned: success
type: bool
sample: false
proxied:
description: Whether the record is proxied through Cloudflare.
returned: success
type: bool
sample: false
ttl:
description: The time-to-live for the record.
returned: success
type: int
sample: 300
type:
description: The record type.
returned: success
type: str
sample: A
zone_id:
description: The ID of the zone containing the record.
returned: success
type: str
sample: abcede0bf9f0066f94029d2e6b73856a
zone_name:
description: The name of the zone containing the record.
returned: success
type: str
sample: sample.com
'''
import json
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.urls import fetch_url
def lowercase_string(param):
if not isinstance(param, str):
return param
return param.lower()
class CloudflareAPI(object):
cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
changed = False
def __init__(self, module):
self.module = module
self.api_token = module.params['api_token']
self.account_api_key = module.params['account_api_key']
self.account_email = module.params['account_email']
self.algorithm = module.params['algorithm']
self.cert_usage = module.params['cert_usage']
self.hash_type = module.params['hash_type']
self.flag = module.params['flag']
self.tag = module.params['tag']
self.key_tag = module.params['key_tag']
self.port = module.params['port']
self.priority = module.params['priority']
self.proto = lowercase_string(module.params['proto'])
self.proxied = module.params['proxied']
self.selector = module.params['selector']
self.record = lowercase_string(module.params['record'])
self.service = lowercase_string(module.params['service'])
self.is_solo = module.params['solo']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.ttl = module.params['ttl']
self.type = module.params['type']
self.value = module.params['value']
self.weight = module.params['weight']
self.zone = lowercase_string(module.params['zone'])
if self.record == '@':
self.record = self.zone
if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
self.value = self.value.rstrip('.').lower()
if (self.type == 'AAAA') and (self.value is not None):
self.value = self.value.lower()
if (self.type == 'SRV'):
if (self.proto is not None) and (not self.proto.startswith('_')):
self.proto = '_' + self.proto
if (self.service is not None) and (not self.service.startswith('_')):
self.service = '_' + self.service
if (self.type == 'TLSA'):
if (self.proto is not None) and (not self.proto.startswith('_')):
self.proto = '_' + self.proto
if (self.port is not None):
self.port = '_' + str(self.port)
if not self.record.endswith(self.zone):
self.record = self.record + '.' + self.zone
if (self.type == 'DS'):
if self.record == self.zone:
self.module.fail_json(msg="DS records only apply to subdomains.")
def _cf_simple_api_call(self, api_call, method='GET', payload=None):
if self.api_token:
headers = {
'Authorization': 'Bearer ' + self.api_token,
'Content-Type': 'application/json',
}
else:
headers = {
'X-Auth-Email': self.account_email,
'X-Auth-Key': self.account_api_key,
'Content-Type': 'application/json',
}
data = None
if payload:
try:
data = json.dumps(payload)
except Exception as e:
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
resp, info = fetch_url(self.module,
self.cf_api_endpoint + api_call,
headers=headers,
data=data,
method=method,
timeout=self.timeout)
if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg')))
error_msg = ''
if info['status'] == 401:
# Unauthorized
error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 403:
# Forbidden
error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 429:
# Too many requests
error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 405:
# Method not allowed
error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 415:
# Unsupported Media Type
error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 400:
# Bad Request
error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
result = None
try:
content = resp.read()
except AttributeError:
if info['body']:
content = info['body']
else:
error_msg += "; The API response was empty"
if content:
try:
result = json.loads(to_text(content, errors='surrogate_or_strict'))
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
# Without a valid/parsed JSON response no more error processing can be done
if result is None:
self.module.fail_json(msg=error_msg)
if 'success' not in result:
error_msg += "; Unexpected error details: {0}".format(result.get('error'))
self.module.fail_json(msg=error_msg)
if not result['success']:
error_msg += "; Error details: "
for error in result['errors']:
error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
if 'error_chain' in error:
for chain_error in error['error_chain']:
error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
self.module.fail_json(msg=error_msg)
return result, info['status']
def _cf_api_call(self, api_call, method='GET', payload=None):
result, status = self._cf_simple_api_call(api_call, method, payload)
data = result['result']
if 'result_info' in result:
pagination = result['result_info']
if pagination['total_pages'] > 1:
next_page = int(pagination['page']) + 1
parameters = ['page={0}'.format(next_page)]
# strip "page" parameter from call parameters (if there are any)
if '?' in api_call:
raw_api_call, query = api_call.split('?', 1)
parameters += [param for param in query.split('&') if not param.startswith('page')]
else:
raw_api_call = api_call
while next_page <= pagination['total_pages']:
raw_api_call += '?' + '&'.join(parameters)
result, status = self._cf_simple_api_call(raw_api_call, method, payload)
data += result['result']
next_page += 1
return data, status
def _get_zone_id(self, zone=None):
if not zone:
zone = self.zone
zones = self.get_zones(zone)
if len(zones) > 1:
self.module.fail_json(msg="More than one zone matches {0}".format(zone))
if len(zones) < 1:
self.module.fail_json(msg="No zone found with name {0}".format(zone))
return zones[0]['id']
def get_zones(self, name=None):
if not name:
name = self.zone
param = ''
if name:
param = '?' + urlencode({'name': name})
zones, status = self._cf_api_call('/zones' + param)
return zones
def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
if not zone_name:
zone_name = self.zone
if not type:
type = self.type
if not record:
record = self.record
# necessary because None as value means to override user
# set module value
if (not value) and (value is not None):
value = self.value
zone_id = self._get_zone_id()
api_call = '/zones/{0}/dns_records'.format(zone_id)
query = {}
if type:
query['type'] = type
if record:
query['name'] = record
if value:
query['content'] = value
if query:
api_call += '?' + urlencode(query)
records, status = self._cf_api_call(api_call)
return records
def delete_dns_records(self, **kwargs):
params = {}
for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']:
if param in kwargs:
params[param] = kwargs[param]
else:
params[param] = getattr(self, param)
records = []
content = params['value']
search_record = params['record']
if params['type'] == 'SRV':
if not (params['value'] is None or params['value'] == ''):
content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
elif params['type'] == 'DS':
if not (params['value'] is None or params['value'] == ''):
content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
elif params['type'] == 'SSHFP':
if not (params['value'] is None or params['value'] == ''):
content = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value'].upper()
elif params['type'] == 'TLSA':
if not (params['value'] is None or params['value'] == ''):
content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
if params['solo']:
search_value = None
else:
search_value = content
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
for rr in records:
if params['solo']:
if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
else:
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
return self.changed
def ensure_dns_record(self, **kwargs):
params = {}
for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']:
if param in kwargs:
params[param] = kwargs[param]
else:
params[param] = getattr(self, param)
search_value = params['value']
search_record = params['record']
new_record = None
if (params['type'] is None) or (params['record'] is None):
self.module.fail_json(msg="You must provide a type and a record to create a new record")
if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS']):
if not params['value']:
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
# there can only be one CNAME per record
# ignoring the value when searching for existing
# CNAME records allows us to update the value if it
# changes
if params['type'] == 'CNAME':
search_value = None
new_record = {
"type": params['type'],
"name": params['record'],
"content": params['value'],
"ttl": params['ttl']
}
if (params['type'] in ['A', 'AAAA', 'CNAME']):
new_record["proxied"] = params["proxied"]
if params['type'] == 'MX':
for attr in [params['priority'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide priority and a value to create this record type")
new_record = {
"type": params['type'],
"name": params['record'],
"content": params['value'],
"priority": params['priority'],
"ttl": params['ttl']
}
if params['type'] == 'SRV':
for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
srv_data = {
"target": params['value'],
"port": params['port'],
"weight": params['weight'],
"priority": params['priority'],
"name": params['record'],
"proto": params['proto'],
"service": params['service']
}
new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
if params['type'] == 'DS':
for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
ds_data = {
"key_tag": params['key_tag'],
"algorithm": params['algorithm'],
"digest_type": params['hash_type'],
"digest": params['value'],
}
new_record = {
"type": params['type'],
"name": params['record'],
'data': ds_data,
"ttl": params['ttl'],
}
search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
if params['type'] == 'SSHFP':
for attr in [params['algorithm'], params['hash_type'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
sshfp_data = {
"fingerprint": params['value'].upper(),
"type": params['hash_type'],
"algorithm": params['algorithm'],
}
new_record = {
"type": params['type'],
"name": params['record'],
'data': sshfp_data,
"ttl": params['ttl'],
}
search_value = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value']
if params['type'] == 'TLSA':
for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
tlsa_data = {
"usage": params['cert_usage'],
"selector": params['selector'],
"matching_type": params['hash_type'],
"certificate": params['value'],
}
new_record = {
"type": params['type'],
"name": search_record,
'data': tlsa_data,
"ttl": params['ttl'],
}
search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
if params['type'] == 'CAA':
for attr in [params['flag'], params['tag'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide flag, tag and a value to create this record type")
caa_data = {
"flags": params['flag'],
"tag": params['tag'],
"value": params['value'],
}
new_record = {
"type": params['type'],
"name": params['record'],
'data': caa_data,
"ttl": params['ttl'],
}
search_value = None
zone_id = self._get_zone_id(params['zone'])
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
# in theory this should be impossible as cloudflare does not allow
# the creation of duplicate records but lets cover it anyways
if len(records) > 1:
# As Cloudflare API cannot filter record containing quotes
# CAA records must be compared locally
if params['type'] == 'CAA':
for rr in records:
if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']:
return rr, self.changed
else:
self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
# record already exists, check if it must be updated
if len(records) == 1:
cur_record = records[0]
do_update = False
if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
do_update = True
if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
do_update = True
if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
do_update = True
if ('data' in new_record) and ('data' in cur_record):
if (cur_record['data'] != new_record['data']):
do_update = True
if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
do_update = True
if do_update:
if self.module.check_mode:
result = new_record
else:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
self.changed = True
return result, self.changed
else:
return records, self.changed
if self.module.check_mode:
result = new_record
else:
result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
self.changed = True
return result, self.changed
def main():
module = AnsibleModule(
argument_spec=dict(
api_token=dict(
type="str",
required=False,
no_log=True,
fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]),
),
account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
account_email=dict(type='str', required=False),
algorithm=dict(type='int'),
cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
hash_type=dict(type='int', choices=[1, 2]),
key_tag=dict(type='int', no_log=False),
port=dict(type='int'),
flag=dict(type='int', choices=[0, 1]),
tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']),
priority=dict(type='int', default=1),
proto=dict(type='str'),
proxied=dict(type='bool', default=False),
record=dict(type='str', default='@', aliases=['name']),
selector=dict(type='int', choices=[0, 1]),
service=dict(type='str'),
solo=dict(type='bool'),
state=dict(type='str', default='present', choices=['absent', 'present']),
timeout=dict(type='int', default=30),
ttl=dict(type='int', default=1),
type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT']),
value=dict(type='str', aliases=['content']),
weight=dict(type='int', default=1),
zone=dict(type='str', required=True, aliases=['domain']),
),
supports_check_mode=True,
required_if=[
('state', 'present', ['record', 'type', 'value']),
('state', 'absent', ['record']),
('type', 'SRV', ['proto', 'service']),
('type', 'TLSA', ['proto', 'port']),
('type', 'CAA', ['flag', 'tag']),
],
)
if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
if module.params['type'] == 'SRV':
if not ((module.params['weight'] is not None and module.params['port'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
or (module.params['weight'] is None and module.params['port'] is None
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
if module.params['type'] == 'SSHFP':
if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
or (module.params['algorithm'] is None and module.params['hash_type'] is None
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.")
if module.params['type'] == 'TLSA':
if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
if module.params['type'] == 'CAA':
if not ((module.params['flag'] is not None and module.params['tag'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
or (module.params['flag'] is None and module.params['tag'] is None
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined, or not at all.")
if module.params['type'] == 'DS':
if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.")
changed = False
cf_api = CloudflareAPI(module)
# sanity checks
if cf_api.is_solo and cf_api.state == 'absent':
module.fail_json(msg="solo=true can only be used with state=present")
# perform add, delete or update (only the TTL can be updated) of one or
# more records
if cf_api.state == 'present':
# delete all records matching record name + type
if cf_api.is_solo:
changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
result, changed = cf_api.ensure_dns_record()
if isinstance(result, list):
module.exit_json(changed=changed, result={'record': result[0]})
module.exit_json(changed=changed, result={'record': result})
else:
# force solo to False, just to be sure
changed = cf_api.delete_dns_records(solo=False)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,153 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cobbler_sync
short_description: Sync Cobbler
description:
- Sync Cobbler to commit changes.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
host:
description:
- The name or IP address of the Cobbler system.
default: 127.0.0.1
type: str
port:
description:
- Port number to be used for REST connection.
- The default value depends on parameter O(use_ssl).
type: int
username:
description:
- The username to log in to Cobbler.
default: cobbler
type: str
password:
description:
- The password to log in to Cobbler.
type: str
use_ssl:
description:
- If V(false), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- If V(false), SSL certificates will not be validated.
- This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
author:
- Dag Wieers (@dagwieers)
todo:
notes:
- Concurrently syncing Cobbler is bound to fail with weird errors.
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
'''
EXAMPLES = r'''
- name: Commit Cobbler changes
community.general.cobbler_sync:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
run_once: true
delegate_to: localhost
'''
RETURN = r'''
# Default return values
'''
import ssl
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.module_utils.common.text.converters import to_text
from ansible_collections.community.general.plugins.module_utils.datetime import (
now,
)
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
port=dict(type='int'),
username=dict(type='str', default='cobbler'),
password=dict(type='str', no_log=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
username = module.params['username']
password = module.params['password']
port = module.params['port']
use_ssl = module.params['use_ssl']
validate_certs = module.params['validate_certs']
module.params['proto'] = 'https' if use_ssl else 'http'
if not port:
module.params['port'] = '443' if use_ssl else '80'
result = dict(
changed=True,
)
start = now()
ssl_context = None
if not validate_certs:
try:
ssl_context = ssl._create_unverified_context()
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = ssl._create_unverified_context
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
if ssl_context:
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
else:
conn = xmlrpc_client.Server(url)
try:
token = conn.login(username, password)
except xmlrpc_client.Fault as e:
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
except Exception as e:
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
if not module.check_mode:
try:
conn.sync(token)
except Exception as e:
module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
elapsed = now() - start
module.exit_json(elapsed=elapsed.seconds, **result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,351 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cobbler_system
short_description: Manage system objects in Cobbler
description:
- Add, modify or remove systems in Cobbler
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
options:
host:
description:
- The name or IP address of the Cobbler system.
default: 127.0.0.1
type: str
port:
description:
- Port number to be used for REST connection.
- The default value depends on parameter O(use_ssl).
type: int
username:
description:
- The username to log in to Cobbler.
default: cobbler
type: str
password:
description:
- The password to log in to Cobbler.
type: str
use_ssl:
description:
- If V(false), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- If V(false), SSL certificates will not be validated.
- This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
name:
description:
- The system name to manage.
type: str
properties:
description:
- A dictionary with system properties.
type: dict
interfaces:
description:
- A list of dictionaries containing interface options.
type: dict
sync:
description:
- Sync on changes.
- Concurrently syncing Cobbler is bound to fail.
type: bool
default: false
state:
description:
- Whether the system should be present, absent or a query is made.
choices: [ absent, present, query ]
default: present
type: str
author:
- Dag Wieers (@dagwieers)
notes:
- Concurrently syncing Cobbler is bound to fail with weird errors.
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
'''
EXAMPLES = r'''
- name: Ensure the system exists in Cobbler
community.general.cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: myhost
properties:
profile: CentOS6-x86_64
name_servers: [ 2.3.4.5, 3.4.5.6 ]
name_servers_search: foo.com, bar.com
interfaces:
eth0:
macaddress: 00:01:02:03:04:05
ipaddress: 1.2.3.4
delegate_to: localhost
- name: Enable network boot in Cobbler
community.general.cobbler_system:
host: bdsol-aci-cobbler-01
username: cobbler
password: ins3965!
name: bdsol-aci51-apic1.cisco.com
properties:
netboot_enabled: true
state: present
delegate_to: localhost
- name: Query all systems in Cobbler
community.general.cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
state: query
register: cobbler_systems
delegate_to: localhost
- name: Query a specific system in Cobbler
community.general.cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: '{{ inventory_hostname }}'
state: query
register: cobbler_properties
delegate_to: localhost
- name: Ensure the system does not exist in Cobbler
community.general.cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: myhost
state: absent
delegate_to: localhost
'''
RETURN = r'''
systems:
description: List of systems
returned: O(state=query) and O(name) is not provided
type: list
system:
description: (Resulting) information about the system we are working with
returned: when O(name) is provided
type: dict
'''
import ssl
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.module_utils.common.text.converters import to_text
from ansible_collections.community.general.plugins.module_utils.datetime import (
now,
)
IFPROPS_MAPPING = dict(
bondingopts='bonding_opts',
bridgeopts='bridge_opts',
connected_mode='connected_mode',
cnames='cnames',
dhcptag='dhcp_tag',
dnsname='dns_name',
ifgateway='if_gateway',
interfacetype='interface_type',
interfacemaster='interface_master',
ipaddress='ip_address',
ipv6address='ipv6_address',
ipv6defaultgateway='ipv6_default_gateway',
ipv6mtu='ipv6_mtu',
ipv6prefix='ipv6_prefix',
ipv6secondaries='ipv6_secondariesu',
ipv6staticroutes='ipv6_static_routes',
macaddress='mac_address',
management='management',
mtu='mtu',
netmask='netmask',
static='static',
staticroutes='static_routes',
virtbridge='virt_bridge',
)
def getsystem(conn, name, token):
system = dict()
if name:
# system = conn.get_system(name, token)
systems = conn.find_system(dict(name=name), token)
if systems:
system = systems[0]
return system
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
port=dict(type='int'),
username=dict(type='str', default='cobbler'),
password=dict(type='str', no_log=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
name=dict(type='str'),
interfaces=dict(type='dict'),
properties=dict(type='dict'),
sync=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
),
supports_check_mode=True,
)
username = module.params['username']
password = module.params['password']
port = module.params['port']
use_ssl = module.params['use_ssl']
validate_certs = module.params['validate_certs']
name = module.params['name']
state = module.params['state']
module.params['proto'] = 'https' if use_ssl else 'http'
if not port:
module.params['port'] = '443' if use_ssl else '80'
result = dict(
changed=False,
)
start = now()
ssl_context = None
if not validate_certs:
try:
ssl_context = ssl._create_unverified_context()
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = ssl._create_unverified_context
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
if ssl_context:
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
else:
conn = xmlrpc_client.Server(url)
try:
token = conn.login(username, password)
except xmlrpc_client.Fault as e:
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
except Exception as e:
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
system = getsystem(conn, name, token)
# result['system'] = system
if state == 'query':
if name:
result['system'] = system
else:
# Turn it into a dictionary of dictionaries
# all_systems = conn.get_systems()
# result['systems'] = { system['name']: system for system in all_systems }
# Return a list of dictionaries
result['systems'] = conn.get_systems()
elif state == 'present':
if system:
# Update existing entry
system_id = conn.get_system_handle(name, token)
for key, value in iteritems(module.params['properties']):
if key not in system:
module.warn("Property '{0}' is not a valid system property.".format(key))
if system[key] != value:
try:
conn.modify_system(system_id, key, value, token)
result['changed'] = True
except Exception as e:
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
else:
# Create a new entry
system_id = conn.new_system(token)
conn.modify_system(system_id, 'name', name, token)
result['changed'] = True
if module.params['properties']:
for key, value in iteritems(module.params['properties']):
try:
conn.modify_system(system_id, key, value, token)
except Exception as e:
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
# Add interface properties
interface_properties = dict()
if module.params['interfaces']:
for device, values in iteritems(module.params['interfaces']):
for key, value in iteritems(values):
if key == 'name':
continue
if key not in IFPROPS_MAPPING:
module.warn("Property '{0}' is not a valid system property.".format(key))
if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
result['changed'] = True
interface_properties['{0}-{1}'.format(key, device)] = value
if result['changed'] is True:
conn.modify_system(system_id, "modify_interface", interface_properties, token)
# Only save when the entry was changed
if not module.check_mode and result['changed']:
conn.save_system(system_id, token)
elif state == 'absent':
if system:
if not module.check_mode:
conn.remove_system(name, token)
result['changed'] = True
if not module.check_mode and module.params['sync'] and result['changed']:
try:
conn.sync(token)
except Exception as e:
module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
if state in ('absent', 'present'):
result['system'] = getsystem(conn, name, token)
if module._diff:
result['diff'] = dict(before=system, after=result['system'])
elapsed = now() - start
module.exit_json(elapsed=elapsed.seconds, **result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,276 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: composer
author:
- "Dimitrios Tydeas Mengidis (@dmtrs)"
- "René Moser (@resmo)"
short_description: Dependency Manager for PHP
description:
- >
Composer is a tool for dependency management in PHP. It allows you to
declare the dependent libraries your project needs and it will install
them in your project for you.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
command:
type: str
description:
- Composer command like "install", "update" and so on.
default: install
arguments:
type: str
description:
- Composer arguments like required package, version and so on.
default: ''
executable:
type: path
description:
- Path to PHP Executable on the remote host, if PHP is not in PATH.
aliases: [ php_path ]
working_dir:
type: path
description:
- Directory of your project (see --working-dir). This is required when
the command is not run globally.
- Will be ignored if O(global_command=true).
global_command:
description:
- Runs the specified command globally.
type: bool
default: false
prefer_source:
description:
- Forces installation from package sources when possible (see --prefer-source).
default: false
type: bool
prefer_dist:
description:
- Forces installation from package dist even for dev versions (see --prefer-dist).
default: false
type: bool
no_dev:
description:
- Disables installation of require-dev packages (see --no-dev).
default: true
type: bool
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json (see --no-scripts).
default: false
type: bool
no_plugins:
description:
- Disables all plugins (see --no-plugins).
default: false
type: bool
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump (see --optimize-autoloader).
- Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
- Recommended especially for production, but can take a bit of time to run.
default: true
type: bool
classmap_authoritative:
description:
- Autoload classes from classmap only.
- Implicitly enable optimize_autoloader.
- Recommended especially for production, but can take a bit of time to run.
default: false
type: bool
apcu_autoloader:
description:
- Uses APCu to cache found/not-found classes
default: false
type: bool
ignore_platform_reqs:
description:
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
default: false
type: bool
composer_executable:
type: path
description:
- Path to composer executable on the remote host, if composer is not in E(PATH) or a custom composer is needed.
version_added: 3.2.0
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin) or specified in O(composer_executable)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
- We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
'''
EXAMPLES = '''
- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
community.general.composer:
command: install
working_dir: /path/to/project
- name: Install a new package
community.general.composer:
command: require
arguments: my/package
working_dir: /path/to/project
- name: Clone and install a project with all dependencies
community.general.composer:
command: create-project
arguments: package/package /path/to/project ~1.0
working_dir: /path/to/project
prefer_dist: true
- name: Install a package globally
community.general.composer:
command: require
global_command: true
arguments: my/package
'''
import re
from ansible.module_utils.basic import AnsibleModule
def parse_out(string):
return re.sub(r"\s+", " ", string).strip()
def has_changed(string):
for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]:
if no_change in string:
return False
return True
def get_available_options(module, command='install'):
# get all available options from a composer command using composer help to json
rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json")
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
command_help_json = module.from_json(out)
return command_help_json['definition']['options']
def composer_command(module, command, arguments="", options=None):
if options is None:
options = []
global_command = module.params['global_command']
if not global_command:
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
if module.params['executable'] is None:
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
else:
php_path = module.params['executable']
if module.params['composer_executable'] is None:
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
else:
composer_path = module.params['composer_executable']
cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(default="install", type="str"),
arguments=dict(default="", type="str"),
executable=dict(type="path", aliases=["php_path"]),
working_dir=dict(type="path"),
global_command=dict(default=False, type="bool"),
prefer_source=dict(default=False, type="bool"),
prefer_dist=dict(default=False, type="bool"),
no_dev=dict(default=True, type="bool"),
no_scripts=dict(default=False, type="bool"),
no_plugins=dict(default=False, type="bool"),
apcu_autoloader=dict(default=False, type="bool"),
optimize_autoloader=dict(default=True, type="bool"),
classmap_authoritative=dict(default=False, type="bool"),
ignore_platform_reqs=dict(default=False, type="bool"),
composer_executable=dict(type="path"),
),
required_if=[('global_command', False, ['working_dir'])],
supports_check_mode=True
)
# Get composer command with fallback to default
command = module.params['command']
if re.search(r"\s", command):
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
available_options = get_available_options(module=module, command=command)
options = []
# Default options
default_options = [
'no-ansi',
'no-interaction',
'no-progress',
]
for option in default_options:
if option in available_options:
option = "--%s" % option
options.append(option)
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no-plugins',
'apcu_autoloader': 'acpu-autoloader',
'optimize_autoloader': 'optimize-autoloader',
'classmap_authoritative': 'classmap-authoritative',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for param, option in option_params.items():
if module.params.get(param) and option in available_options:
option = "--%s" % option
options.append(option)
if module.check_mode:
if 'dry-run' in available_options:
options.append('--dry-run')
else:
module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
rc, out, err = composer_command(module, command, arguments, options)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output, stdout=err)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,640 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: consul
short_description: Add, modify & delete services within a consul cluster
description:
- Registers services and checks for an agent with a consul cluster.
A service is some process running on the agent node that should be advertised by
consul's discovery mechanism. It may optionally supply a check definition,
a periodic service test to notify the consul cluster of service's health.
- "Checks may also be registered per node e.g. disk usage, or cpu usage and
notify the health of the entire node to the cluster.
Service level checks do not require a check name or id as these are derived
by Consul from the Service name and id respectively by appending 'service:'
Node level checks require a O(check_name) and optionally a O(check_id)."
- Currently, there is no complete way to retrieve the script, interval or TTL
metadata for a registered check. Without this metadata it is not possible to
tell if the data supplied with ansible represents a change to a check. As a
result this does not attempt to determine changes and will always report a
changed occurred. An API method is planned to supply this metadata so at that
stage change management will be added.
- "See U(http://consul.io) for more details."
requirements:
- python-consul
- requests
author: "Steve Gargan (@sgargan)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
state:
type: str
description:
- Register or deregister the consul service, defaults to present.
default: present
choices: ['present', 'absent']
service_name:
type: str
description:
- Unique name for the service on a node, must be unique per node,
required if registering a service. May be omitted if registering
a node level check.
service_id:
type: str
description:
- The ID for the service, must be unique per node. If O(state=absent),
defaults to the service name if supplied.
host:
type: str
description:
- Host of the consul agent defaults to localhost.
default: localhost
port:
type: int
description:
- The port on which the consul agent is running.
default: 8500
scheme:
type: str
description:
- The protocol scheme on which the consul agent is running.
default: http
validate_certs:
description:
- Whether to verify the TLS certificate of the consul agent.
type: bool
default: true
notes:
type: str
description:
- Notes to attach to check when registering it.
service_port:
type: int
description:
- The port on which the service is listening. Can optionally be supplied for
registration of a service, that is if O(service_name) or O(service_id) is set.
service_address:
type: str
description:
- The address to advertise that the service will be listening on.
This value will be passed as the C(address) parameter to Consul's
C(/v1/agent/service/register) API method, so refer to the Consul API
documentation for further details.
tags:
type: list
elements: str
description:
- Tags that will be attached to the service registration.
script:
type: str
description:
- The script/command that will be run periodically to check the health of the service.
- Requires O(interval) to be provided.
- Mutually exclusive with O(ttl), O(tcp) and O(http).
interval:
type: str
description:
- The interval at which the service check will be run.
This is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m).
If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
- Required if one of the parameters O(script), O(http), or O(tcp) is specified.
check_id:
type: str
description:
- An ID for the service check. If O(state=absent), defaults to
O(check_name). Ignored if part of a service definition.
check_name:
type: str
description:
- Name for the service check. Required if standalone, ignored if
part of service definition.
check_node:
description:
- Node name.
# TODO: properly document!
type: str
check_host:
description:
- Host name.
# TODO: properly document!
type: str
ttl:
type: str
description:
- Checks can be registered with a TTL instead of a O(script) and O(interval)
this means that the service will check in with the agent before the
TTL expires. If it doesn't the check will be considered failed.
Required if registering a check and the script an interval are missing
Similar to the interval this is a number with a V(s) or V(m) suffix to
signify the units of seconds or minutes, for example V(15s) or V(1m).
If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
- Mutually exclusive with O(script), O(tcp) and O(http).
tcp:
type: str
description:
- Checks can be registered with a TCP port. This means that consul
will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
The format is V(host:port), for example V(localhost:80).
- Requires O(interval) to be provided.
- Mutually exclusive with O(script), O(ttl) and O(http).
version_added: '1.3.0'
http:
type: str
description:
- Checks can be registered with an HTTP endpoint. This means that consul
will check that the http endpoint returns a successful HTTP status.
- Requires O(interval) to be provided.
- Mutually exclusive with O(script), O(ttl) and O(tcp).
timeout:
type: str
description:
- A custom HTTP check timeout. The consul default is 10 seconds.
Similar to the interval this is a number with a V(s) or V(m) suffix to
signify the units of seconds or minutes, for example V(15s) or V(1m).
If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
token:
type: str
description:
- The token key identifying an ACL rule set. May be required to register services.
ack_params_state_absent:
type: bool
description:
- This parameter has no more effect and is deprecated. It will be removed in community.general 10.0.0.
'''
EXAMPLES = '''
- name: Register nginx service with the local consul agent
community.general.consul:
service_name: nginx
service_port: 80
- name: Register nginx service with curl check
community.general.consul:
service_name: nginx
service_port: 80
script: curl http://localhost
interval: 60s
- name: register nginx with a tcp check
community.general.consul:
service_name: nginx
service_port: 80
interval: 60s
tcp: localhost:80
- name: Register nginx with an http check
community.general.consul:
service_name: nginx
service_port: 80
interval: 60s
http: http://localhost:80/status
- name: Register external service nginx available at 10.1.5.23
community.general.consul:
service_name: nginx
service_port: 80
service_address: 10.1.5.23
- name: Register nginx with some service tags
community.general.consul:
service_name: nginx
service_port: 80
tags:
- prod
- webservers
- name: Remove nginx service
community.general.consul:
service_name: nginx
state: absent
- name: Register celery worker service
community.general.consul:
service_name: celery-worker
tags:
- prod
- worker
- name: Create a node level check to test disk usage
community.general.consul:
check_name: Disk usage
check_id: disk_usage
script: /opt/disk_usage.py
interval: 5m
- name: Register an http check against a service that's already registered
community.general.consul:
check_name: nginx-check2
check_id: nginx-check2
service_id: nginx
interval: 60s
http: http://localhost:80/morestatus
'''
try:
import consul
from requests.exceptions import ConnectionError
class PatchedConsulAgentService(consul.Consul.Agent.Service):
def deregister(self, service_id, token=None):
params = {}
if token:
params['token'] = token
return self.agent.http.put(consul.base.CB.bool(),
'/v1/agent/service/deregister/%s' % service_id,
params=params)
python_consul_installed = True
except ImportError:
python_consul_installed = False
import re
from ansible.module_utils.basic import AnsibleModule
def register_with_consul(module):
state = module.params['state']
if state == 'present':
add(module)
else:
remove(module)
def add(module):
''' adds a service or a check depending on supplied configuration'''
check = parse_check(module)
service = parse_service(module)
if not service and not check:
module.fail_json(msg='a name and port are required to register a service')
if service:
if check:
service.add_check(check)
add_service(module, service)
elif check:
add_check(module, check)
def remove(module):
''' removes a service or a check '''
service_id = module.params['service_id'] or module.params['service_name']
check_id = module.params['check_id'] or module.params['check_name']
if service_id:
remove_service(module, service_id)
else:
remove_check(module, check_id)
def add_check(module, check):
''' registers a check with the given agent. currently there is no way
retrieve the full metadata of an existing check through the consul api.
Without this we can't compare to the supplied check and so we must assume
a change. '''
if not check.name and not check.service_id:
module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
consul_api = get_consul_api(module)
check.register(consul_api)
module.exit_json(changed=True,
check_id=check.check_id,
check_name=check.name,
script=check.script,
interval=check.interval,
ttl=check.ttl,
tcp=check.tcp,
http=check.http,
timeout=check.timeout,
service_id=check.service_id)
def remove_check(module, check_id):
''' removes a check using its id '''
consul_api = get_consul_api(module)
if check_id in consul_api.agent.checks():
consul_api.agent.check.deregister(check_id)
module.exit_json(changed=True, id=check_id)
module.exit_json(changed=False, id=check_id)
def add_service(module, service):
''' registers a service with the current agent '''
result = service
changed = False
consul_api = get_consul_api(module)
existing = get_service_by_id_or_name(consul_api, service.id)
# there is no way to retrieve the details of checks so if a check is present
# in the service it must be re-registered
if service.has_checks() or not existing or not existing == service:
service.register(consul_api)
# check that it registered correctly
registered = get_service_by_id_or_name(consul_api, service.id)
if registered:
result = registered
changed = True
module.exit_json(changed=changed,
service_id=result.id,
service_name=result.name,
service_port=result.port,
checks=[check.to_dict() for check in service.checks()],
tags=result.tags)
def remove_service(module, service_id):
''' deregister a service from the given agent using its service id '''
consul_api = get_consul_api(module)
service = get_service_by_id_or_name(consul_api, service_id)
if service:
consul_api.agent.service.deregister(service_id, token=module.params['token'])
module.exit_json(changed=True, id=service_id)
module.exit_json(changed=False, id=service_id)
def get_consul_api(module):
consulClient = consul.Consul(host=module.params['host'],
port=module.params['port'],
scheme=module.params['scheme'],
verify=module.params['validate_certs'],
token=module.params['token'])
consulClient.agent.service = PatchedConsulAgentService(consulClient)
return consulClient
def get_service_by_id_or_name(consul_api, service_id_or_name):
''' iterate the registered services and find one with the given id '''
for dummy, service in consul_api.agent.services().items():
if service_id_or_name in (service['ID'], service['Service']):
return ConsulService(loaded=service)
def parse_check(module):
if module.params['check_id'] or any(module.params[p] is not None for p in ('script', 'ttl', 'tcp', 'http')):
return ConsulCheck(
module.params['check_id'],
module.params['check_name'],
module.params['check_node'],
module.params['check_host'],
module.params['script'],
module.params['interval'],
module.params['ttl'],
module.params['notes'],
module.params['tcp'],
module.params['http'],
module.params['timeout'],
module.params['service_id'],
)
def parse_service(module):
return ConsulService(
module.params['service_id'],
module.params['service_name'],
module.params['service_address'],
module.params['service_port'],
module.params['tags'],
)
class ConsulService(object):
def __init__(self, service_id=None, name=None, address=None, port=-1,
tags=None, loaded=None):
self.id = self.name = name
if service_id:
self.id = service_id
self.address = address
self.port = port
self.tags = tags
self._checks = []
if loaded:
self.id = loaded['ID']
self.name = loaded['Service']
self.port = loaded['Port']
self.tags = loaded['Tags']
def register(self, consul_api):
optional = {}
if self.port:
optional['port'] = self.port
if len(self._checks) > 0:
optional['check'] = self._checks[0].check
consul_api.agent.service.register(
self.name,
service_id=self.id,
address=self.address,
tags=self.tags,
**optional)
def add_check(self, check):
self._checks.append(check)
def checks(self):
return self._checks
def has_checks(self):
return len(self._checks) > 0
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.id == other.id and
self.name == other.name and
self.port == other.port and
self.tags == other.tags)
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
data = {'id': self.id, "name": self.name}
if self.port:
data['port'] = self.port
if self.tags and len(self.tags) > 0:
data['tags'] = self.tags
if len(self._checks) > 0:
data['check'] = self._checks[0].to_dict()
return data
class ConsulCheck(object):
def __init__(self, check_id, name, node=None, host='localhost',
script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
self.check_id = self.name = name
if check_id:
self.check_id = check_id
self.service_id = service_id
self.notes = notes
self.node = node
self.host = host
self.interval = self.validate_duration('interval', interval)
self.ttl = self.validate_duration('ttl', ttl)
self.script = script
self.tcp = tcp
self.http = http
self.timeout = self.validate_duration('timeout', timeout)
self.check = None
if script:
self.check = consul.Check.script(script, self.interval)
if ttl:
self.check = consul.Check.ttl(self.ttl)
if http:
self.check = consul.Check.http(http, self.interval, self.timeout)
if tcp:
regex = r"(?P<host>.*):(?P<port>(?:[0-9]+))$"
match = re.match(regex, tcp)
if not match:
raise Exception('tcp check must be in host:port format')
self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
def validate_duration(self, name, duration):
if duration:
duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
if not any(duration.endswith(suffix) for suffix in duration_units):
duration = "{0}s".format(duration)
return duration
def register(self, consul_api):
consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
notes=self.notes,
check=self.check)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.check_id == other.check_id and
self.service_id == other.service_id and
self.name == other.name and
self.script == other.script and
self.interval == other.interval)
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
data = {}
self._add(data, 'id', attr='check_id')
self._add(data, 'name', attr='check_name')
self._add(data, 'script')
self._add(data, 'node')
self._add(data, 'notes')
self._add(data, 'host')
self._add(data, 'interval')
self._add(data, 'ttl')
self._add(data, 'tcp')
self._add(data, 'http')
self._add(data, 'timeout')
self._add(data, 'service_id')
return data
def _add(self, data, key, attr=None):
try:
if attr is None:
attr = key
data[key] = getattr(self, attr)
except Exception:
pass
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(default='localhost'),
port=dict(default=8500, type='int'),
scheme=dict(default='http'),
validate_certs=dict(default=True, type='bool'),
check_id=dict(),
check_name=dict(),
check_node=dict(),
check_host=dict(),
notes=dict(),
script=dict(),
service_id=dict(),
service_name=dict(),
service_address=dict(type='str'),
service_port=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
interval=dict(type='str'),
ttl=dict(type='str'),
tcp=dict(type='str'),
http=dict(type='str'),
timeout=dict(type='str'),
tags=dict(type='list', elements='str'),
token=dict(no_log=True),
ack_params_state_absent=dict(
type='bool',
removed_in_version='10.0.0',
removed_from_collection='community.general',
),
),
mutually_exclusive=[
('script', 'ttl', 'tcp', 'http'),
],
required_if=[
('state', 'present', ['service_name']),
('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True),
],
required_by={
'script': 'interval',
'http': 'interval',
'tcp': 'interval',
},
supports_check_mode=False,
)
p = module.params
test_dependencies(module)
if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']):
module.fail_json(
msg="The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is no longer allowed."
)
try:
register_with_consul(module)
except SystemExit:
raise
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (p['host'], p['port'], str(e)))
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,695 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: consul_acl
short_description: Manipulate Consul ACL keys and rules
description:
- Allows the addition, modification and deletion of ACL keys and associated
rules in a consul cluster via the agent. For more details on using and
configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
author:
- Steve Gargan (@sgargan)
- Colin Nolan (@colin-nolan)
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
deprecated:
removed_in: 10.0.0
why: The legacy ACL system was removed from Consul.
alternative: Use M(community.general.consul_token) and/or M(community.general.consul_policy) instead.
options:
mgmt_token:
description:
- a management token is required to manipulate the acl lists
required: true
type: str
state:
description:
- whether the ACL pair should be present or absent
required: false
choices: ['present', 'absent']
default: present
type: str
token_type:
description:
- the type of token that should be created
choices: ['client', 'management']
default: client
type: str
name:
description:
- the name that should be associated with the acl key, this is opaque
to Consul
required: false
type: str
token:
description:
- the token key identifying an ACL rule set. If generated by consul
this will be a UUID
required: false
type: str
rules:
type: list
elements: dict
description:
- rules that should be associated with a given token
required: false
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
type: str
port:
type: int
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
type: str
validate_certs:
type: bool
description:
- whether to verify the tls certificate of the consul agent
required: false
default: true
requirements:
- python-consul
- pyhcl
- requests
'''
EXAMPLES = """
- name: Create an ACL with rules
community.general.consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
rules:
- key: "foo"
policy: read
- key: "private/foo"
policy: deny
- name: Create an ACL with a specific token
community.general.consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
token: my-token
rules:
- key: "foo"
policy: read
- name: Update the rules associated to an ACL token
community.general.consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
token: some_client_token
rules:
- event: "bbq"
policy: write
- key: "foo"
policy: read
- key: "private"
policy: deny
- keyring: write
- node: "hgs4"
policy: write
- operator: read
- query: ""
policy: write
- service: "consul"
policy: write
- session: "standup"
policy: write
- name: Remove a token
community.general.consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
state: absent
"""
RETURN = """
token:
description: the token associated to the ACL (the ACL's ID)
returned: success
type: str
sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
rules:
description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
returned: when O(state=present)
type: dict
sample: {
"key": {
"foo": {
"policy": "write"
},
"bar": {
"policy": "deny"
}
}
}
operation:
description: the operation performed on the ACL
returned: changed
type: str
sample: update
"""
try:
import consul
python_consul_installed = True
except ImportError:
python_consul_installed = False
try:
import hcl
pyhcl_installed = True
except ImportError:
pyhcl_installed = False
try:
from requests.exceptions import ConnectionError
has_requests = True
except ImportError:
has_requests = False
from collections import defaultdict
from ansible.module_utils.basic import to_text, AnsibleModule
RULE_SCOPES = [
"agent",
"agent_prefix",
"event",
"event_prefix",
"key",
"key_prefix",
"keyring",
"node",
"node_prefix",
"operator",
"query",
"query_prefix",
"service",
"service_prefix",
"session",
"session_prefix",
]
MANAGEMENT_PARAMETER_NAME = "mgmt_token"
HOST_PARAMETER_NAME = "host"
SCHEME_PARAMETER_NAME = "scheme"
VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
NAME_PARAMETER_NAME = "name"
PORT_PARAMETER_NAME = "port"
RULES_PARAMETER_NAME = "rules"
STATE_PARAMETER_NAME = "state"
TOKEN_PARAMETER_NAME = "token"
TOKEN_TYPE_PARAMETER_NAME = "token_type"
PRESENT_STATE_VALUE = "present"
ABSENT_STATE_VALUE = "absent"
CLIENT_TOKEN_TYPE_VALUE = "client"
MANAGEMENT_TOKEN_TYPE_VALUE = "management"
REMOVE_OPERATION = "remove"
UPDATE_OPERATION = "update"
CREATE_OPERATION = "create"
_POLICY_JSON_PROPERTY = "policy"
_RULES_JSON_PROPERTY = "Rules"
_TOKEN_JSON_PROPERTY = "ID"
_TOKEN_TYPE_JSON_PROPERTY = "Type"
_NAME_JSON_PROPERTY = "Name"
_POLICY_YML_PROPERTY = "policy"
_POLICY_HCL_PROPERTY = "policy"
_ARGUMENT_SPEC = {
MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
HOST_PARAMETER_NAME: dict(default='localhost'),
SCHEME_PARAMETER_NAME: dict(default='http'),
VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True),
NAME_PARAMETER_NAME: dict(),
PORT_PARAMETER_NAME: dict(default=8500, type='int'),
RULES_PARAMETER_NAME: dict(type='list', elements='dict'),
STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
TOKEN_PARAMETER_NAME: dict(no_log=False),
TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
default=CLIENT_TOKEN_TYPE_VALUE)
}
def set_acl(consul_client, configuration):
"""
Sets an ACL based on the given configuration.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of setting the ACL
"""
acls_as_json = decode_acls_as_json(consul_client.acl.list())
existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
if None in existing_acls_mapped_by_token:
raise AssertionError("expecting ACL list to be associated to a token: %s" %
existing_acls_mapped_by_token[None])
if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
# No token but name given so can get token from name
configuration.token = existing_acls_mapped_by_name[configuration.name].token
if configuration.token and configuration.token in existing_acls_mapped_by_token:
return update_acl(consul_client, configuration)
else:
if configuration.token in existing_acls_mapped_by_token:
raise AssertionError()
if configuration.name in existing_acls_mapped_by_name:
raise AssertionError()
return create_acl(consul_client, configuration)
def update_acl(consul_client, configuration):
"""
Updates an ACL.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of the update
"""
existing_acl = load_acl_with_token(consul_client, configuration.token)
changed = existing_acl.rules != configuration.rules
if changed:
name = configuration.name if configuration.name is not None else existing_acl.name
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
updated_token = consul_client.acl.update(
configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
if updated_token != configuration.token:
raise AssertionError()
return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
def create_acl(consul_client, configuration):
"""
Creates an ACL.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of the creation
"""
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
token = consul_client.acl.create(
name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
rules = configuration.rules
return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
def remove_acl(consul, configuration):
"""
Removes an ACL.
:param consul: the consul client
:param configuration: the run configuration
:return: the output of the removal
"""
token = configuration.token
changed = consul.acl.info(token) is not None
if changed:
consul.acl.destroy(token)
return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
def load_acl_with_token(consul, token):
"""
Loads the ACL with the given token (token == rule ID).
:param consul: the consul client
:param token: the ACL "token"/ID (not name)
:return: the ACL associated to the given token
:exception ConsulACLTokenNotFoundException: raised if the given token does not exist
"""
acl_as_json = consul.acl.info(token)
if acl_as_json is None:
raise ConsulACLNotFoundException(token)
return decode_acl_as_json(acl_as_json)
def encode_rules_as_hcl_string(rules):
"""
Converts the given rules into the equivalent HCL (string) representation.
:param rules: the rules
:return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
note for justification)
"""
if len(rules) == 0:
# Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
# string if there is no rules...
return None
rules_as_hcl = ""
for rule in rules:
rules_as_hcl += encode_rule_as_hcl_string(rule)
return rules_as_hcl
def encode_rule_as_hcl_string(rule):
"""
Converts the given rule into the equivalent HCL (string) representation.
:param rule: the rule
:return: the equivalent HCL (string) representation of the rule
"""
if rule.pattern is not None:
return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
else:
return '%s = "%s"\n' % (rule.scope, rule.policy)
def decode_rules_as_hcl_string(rules_as_hcl):
"""
Converts the given HCL (string) representation of rules into a list of rule domain models.
:param rules_as_hcl: the HCL (string) representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules_as_hcl = to_text(rules_as_hcl)
rules_as_json = hcl.loads(rules_as_hcl)
return decode_rules_as_json(rules_as_json)
def decode_rules_as_json(rules_as_json):
"""
Converts the given JSON representation of rules into a list of rule domain models.
:param rules_as_json: the JSON representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules = RuleCollection()
for scope in rules_as_json:
if not isinstance(rules_as_json[scope], dict):
rules.add(Rule(scope, rules_as_json[scope]))
else:
for pattern, policy in rules_as_json[scope].items():
rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
return rules
def encode_rules_as_json(rules):
"""
Converts the given rules into the equivalent JSON representation according to the documentation:
https://www.consul.io/docs/guides/acl.html#rule-specification.
:param rules: the rules
:return: JSON representation of the given rules
"""
rules_as_json = defaultdict(dict)
for rule in rules:
if rule.pattern is not None:
if rule.pattern in rules_as_json[rule.scope]:
raise AssertionError()
rules_as_json[rule.scope][rule.pattern] = {
_POLICY_JSON_PROPERTY: rule.policy
}
else:
if rule.scope in rules_as_json:
raise AssertionError()
rules_as_json[rule.scope] = rule.policy
return rules_as_json
def decode_rules_as_yml(rules_as_yml):
"""
Converts the given YAML representation of rules into a list of rule domain models.
:param rules_as_yml: the YAML representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules = RuleCollection()
if rules_as_yml:
for rule_as_yml in rules_as_yml:
rule_added = False
for scope in RULE_SCOPES:
if scope in rule_as_yml:
if rule_as_yml[scope] is None:
raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
else rule_as_yml[scope]
pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
rules.add(Rule(scope, policy, pattern))
rule_added = True
break
if not rule_added:
raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
return rules
def decode_acl_as_json(acl_as_json):
"""
Converts the given JSON representation of an ACL into the equivalent domain model.
:param acl_as_json: the JSON representation of an ACL
:return: the equivalent domain model to the given ACL
"""
rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
else RuleCollection()
return ACL(
rules=rules,
token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
token=acl_as_json[_TOKEN_JSON_PROPERTY],
name=acl_as_json[_NAME_JSON_PROPERTY]
)
def decode_acls_as_json(acls_as_json):
"""
Converts the given JSON representation of ACLs into a list of ACL domain models.
:param acls_as_json: the JSON representation of a collection of ACLs
:return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
"""
return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
class ConsulACLNotFoundException(Exception):
"""
Exception raised if an ACL with is not found.
"""
class Configuration:
"""
Configuration for this module.
"""
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
rules=None, state=None, token=None, token_type=None):
self.management_token = management_token # type: str
self.host = host # type: str
self.scheme = scheme # type: str
self.validate_certs = validate_certs # type: bool
self.name = name # type: str
self.port = port # type: int
self.rules = rules # type: RuleCollection
self.state = state # type: str
self.token = token # type: str
self.token_type = token_type # type: str
class Output:
"""
Output of an action of this module.
"""
def __init__(self, changed=None, token=None, rules=None, operation=None):
self.changed = changed # type: bool
self.token = token # type: str
self.rules = rules # type: RuleCollection
self.operation = operation # type: str
class ACL:
"""
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
"""
def __init__(self, rules, token_type, token, name):
self.rules = rules
self.token_type = token_type
self.token = token
self.name = name
def __eq__(self, other):
return other \
and isinstance(other, self.__class__) \
and self.rules == other.rules \
and self.token_type == other.token_type \
and self.token == other.token \
and self.name == other.name
def __hash__(self):
return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
class Rule:
"""
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
"""
def __init__(self, scope, policy, pattern=None):
self.scope = scope
self.policy = policy
self.pattern = pattern
def __eq__(self, other):
return isinstance(other, self.__class__) \
and self.scope == other.scope \
and self.policy == other.policy \
and self.pattern == other.pattern
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
def __str__(self):
return encode_rule_as_hcl_string(self)
class RuleCollection:
"""
Collection of ACL rules, which are part of a Consul ACL.
"""
def __init__(self):
self._rules = {}
for scope in RULE_SCOPES:
self._rules[scope] = {}
def __iter__(self):
all_rules = []
for scope, pattern_keyed_rules in self._rules.items():
for pattern, rule in pattern_keyed_rules.items():
all_rules.append(rule)
return iter(all_rules)
def __len__(self):
count = 0
for scope in RULE_SCOPES:
count += len(self._rules[scope])
return count
def __eq__(self, other):
return isinstance(other, self.__class__) \
and set(self) == set(other)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return encode_rules_as_hcl_string(self)
def add(self, rule):
"""
Adds the given rule to this collection.
:param rule: model of a rule
:raises ValueError: raised if there already exists a rule for a given scope and pattern
"""
if rule.pattern in self._rules[rule.scope]:
patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
self._rules[rule.scope][rule.pattern] = rule
def get_consul_client(configuration):
"""
Gets a Consul client for the given configuration.
Does not check if the Consul client can connect.
:param configuration: the run configuration
:return: Consul client
"""
token = configuration.management_token
if token is None:
token = configuration.token
if token is None:
raise AssertionError("Expecting the management token to always be set")
return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
verify=configuration.validate_certs, token=token)
def check_dependencies():
"""
Checks that the required dependencies have been imported.
:exception ImportError: if it is detected that any of the required dependencies have not been imported
"""
if not python_consul_installed:
raise ImportError("python-consul required for this module. "
"See: https://python-consul.readthedocs.io/en/latest/#installation")
if not pyhcl_installed:
raise ImportError("pyhcl required for this module. "
"See: https://pypi.org/project/pyhcl/")
if not has_requests:
raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
def main():
"""
Main method.
"""
module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
try:
check_dependencies()
except ImportError as e:
module.fail_json(msg=str(e))
configuration = Configuration(
management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
host=module.params.get(HOST_PARAMETER_NAME),
scheme=module.params.get(SCHEME_PARAMETER_NAME),
validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
name=module.params.get(NAME_PARAMETER_NAME),
port=module.params.get(PORT_PARAMETER_NAME),
rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
state=module.params.get(STATE_PARAMETER_NAME),
token=module.params.get(TOKEN_PARAMETER_NAME),
token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
)
consul_client = get_consul_client(configuration)
try:
if configuration.state == PRESENT_STATE_VALUE:
output = set_acl(consul_client, configuration)
else:
output = remove_acl(consul_client, configuration)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
configuration.host, configuration.port, str(e)))
raise
return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
if output.rules is not None:
return_values["rules"] = encode_rules_as_json(output.rules)
module.exit_json(**return_values)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,108 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024, Florian Apolloner (@apollo13)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: consul_acl_bootstrap
short_description: Bootstrap ACLs in Consul
version_added: 8.3.0
description:
- Allows bootstrapping of ACLs in a Consul cluster, see
U(https://developer.hashicorp.com/consul/api-docs/acl#bootstrap-acls) for details.
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
- community.general.consul
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
state:
description:
- Whether the token should be present or absent.
choices: ['present', 'bootstrapped']
default: present
type: str
bootstrap_secret:
description:
- The secret to be used as secret ID for the initial token.
- Needs to be an UUID.
type: str
"""
EXAMPLES = """
- name: Bootstrap the ACL system
community.general.consul_acl_bootstrap:
bootstrap_secret: 22eaeed1-bdbd-4651-724e-42ae6c43e387
"""
RETURN = """
result:
description:
- The bootstrap result as returned by the consul HTTP API.
- "B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and
C(ID) will not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
If you pass O(bootstrap_secret), make sure your playbook/role does not depend
on this return value!"
returned: changed
type: dict
sample:
AccessorID: 834a5881-10a9-a45b-f63c-490e28743557
CreateIndex: 25
CreateTime: '2024-01-21T20:26:27.114612038+01:00'
Description: Bootstrap Token (Global Management)
Hash: X2AgaFhnQGRhSSF/h0m6qpX1wj/HJWbyXcxkEM/5GrY=
ID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
Local: false
ModifyIndex: 25
Policies:
- ID: 00000000-0000-0000-0000-000000000001
Name: global-management
SecretID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
RequestError,
_ConsulModule,
)
_ARGUMENT_SPEC = {
"state": dict(type="str", choices=["present", "bootstrapped"], default="present"),
"bootstrap_secret": dict(type="str", no_log=True),
}
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
_ARGUMENT_SPEC.pop("token")
def main():
module = AnsibleModule(_ARGUMENT_SPEC)
consul_module = _ConsulModule(module)
data = {}
if "bootstrap_secret" in module.params:
data["BootstrapSecret"] = module.params["bootstrap_secret"]
try:
response = consul_module.put("acl/bootstrap", data=data)
except RequestError as e:
if e.status == 403 and b"ACL bootstrap no longer allowed" in e.response_data:
return module.exit_json(changed=False)
raise
else:
return module.exit_json(changed=True, result=response)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,254 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Michael Ilg
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: consul_agent_check
short_description: Add, modify, and delete checks within a consul cluster
version_added: 9.1.0
description:
- Allows the addition, modification and deletion of checks in a consul
cluster via the agent. For more details on using and configuring Checks,
see U(https://developer.hashicorp.com/consul/api-docs/agent/check).
- Currently, there is no complete way to retrieve the script, interval or TTL
metadata for a registered check. Without this metadata it is not possible to
tell if the data supplied with ansible represents a change to a check. As a
result this does not attempt to determine changes and will always report a
changed occurred. An API method is planned to supply this metadata so at that
stage change management will be added.
author:
- Michael Ilg (@Ilgmi)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.actiongroup_consul
- community.general.consul.token
- community.general.attributes
attributes:
check_mode:
support: full
details:
- The result is the object as it is defined in the module options and not the object structure of the consul API.
For a better overview of what the object structure looks like,
take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks).
diff_mode:
support: partial
details:
- In check mode the diff will show the object as it is defined in the module options and not the object structure of the consul API.
options:
state:
description:
- Whether the check should be present or absent.
choices: ['present', 'absent']
default: present
type: str
name:
description:
- Required name for the service check.
type: str
id:
description:
- Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary to provide
an ID for uniqueness. This value will return in the response as "CheckId".
type: str
interval:
description:
- The interval at which the service check will be run.
This is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m).
If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
- Required if one of the parameters O(args), O(http), or O(tcp) is specified.
type: str
notes:
description:
- Notes to attach to check when registering it.
type: str
args:
description:
- Specifies command arguments to run to update the status of the check.
- Requires O(interval) to be provided.
- Mutually exclusive with O(ttl), O(tcp) and O(http).
type: list
elements: str
ttl:
description:
- Checks can be registered with a TTL instead of a O(args) and O(interval)
this means that the service will check in with the agent before the
TTL expires. If it doesn't the check will be considered failed.
Required if registering a check and the script an interval are missing
Similar to the interval this is a number with a V(s) or V(m) suffix to
signify the units of seconds or minutes, for example V(15s) or V(1m).
If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
- Mutually exclusive with O(args), O(tcp) and O(http).
type: str
tcp:
description:
- Checks can be registered with a TCP port. This means that consul
will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
The format is V(host:port), for example V(localhost:80).
- Requires O(interval) to be provided.
- Mutually exclusive with O(args), O(ttl) and O(http).
type: str
version_added: '1.3.0'
http:
description:
- Checks can be registered with an HTTP endpoint. This means that consul
will check that the http endpoint returns a successful HTTP status.
- Requires O(interval) to be provided.
- Mutually exclusive with O(args), O(ttl) and O(tcp).
type: str
timeout:
description:
- A custom HTTP check timeout. The consul default is 10 seconds.
Similar to the interval this is a number with a V(s) or V(m) suffix to
signify the units of seconds or minutes, for example V(15s) or V(1m).
If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
type: str
service_id:
description:
- The ID for the service, must be unique per node. If O(state=absent),
defaults to the service name if supplied.
type: str
'''
EXAMPLES = '''
- name: Register tcp check for service 'nginx'
community.general.consul_agent_check:
name: nginx_tcp_check
service_id: nginx
interval: 60s
tcp: localhost:80
notes: "Nginx Check"
- name: Register http check for service 'nginx'
community.general.consul_agent_check:
name: nginx_http_check
service_id: nginx
interval: 60s
http: http://localhost:80/status
notes: "Nginx Check"
- name: Remove check for service 'nginx'
community.general.consul_agent_check:
state: absent
id: nginx_http_check
service_id: "{{ nginx_service.ID }}"
'''
RETURN = """
check:
description: The check as returned by the consul HTTP API.
returned: always
type: dict
sample:
CheckID: nginx_check
ServiceID: nginx
Interval: 30s
Type: http
Notes: Nginx Check
operation:
description: The operation performed.
returned: changed
type: str
sample: update
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
OPERATION_CREATE,
OPERATION_UPDATE,
OPERATION_DELETE,
OPERATION_READ,
_ConsulModule,
validate_check,
)
_ARGUMENT_SPEC = {
"state": dict(default="present", choices=["present", "absent"]),
"name": dict(type='str'),
"id": dict(type='str'),
"interval": dict(type='str'),
"notes": dict(type='str'),
"args": dict(type='list', elements='str'),
"http": dict(type='str'),
"tcp": dict(type='str'),
"ttl": dict(type='str'),
"timeout": dict(type='str'),
"service_id": dict(type='str'),
}
_MUTUALLY_EXCLUSIVE = [
('args', 'ttl', 'tcp', 'http'),
]
_REQUIRED_IF = [
('state', 'present', ['name']),
('state', 'absent', ('id', 'name'), True),
]
_REQUIRED_BY = {
'args': 'interval',
'http': 'interval',
'tcp': 'interval',
}
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
class ConsulAgentCheckModule(_ConsulModule):
api_endpoint = "agent/check"
result_key = "check"
unique_identifiers = ["id", "name"]
operational_attributes = {"Node", "CheckID", "Output", "ServiceName", "ServiceTags",
"Status", "Type", "ExposedPort", "Definition"}
def endpoint_url(self, operation, identifier=None):
if operation == OPERATION_READ:
return "agent/checks"
if operation in [OPERATION_CREATE, OPERATION_UPDATE]:
return "/".join([self.api_endpoint, "register"])
if operation == OPERATION_DELETE:
return "/".join([self.api_endpoint, "deregister", identifier])
return super(ConsulAgentCheckModule, self).endpoint_url(operation, identifier)
def read_object(self):
url = self.endpoint_url(OPERATION_READ)
checks = self.get(url)
identifier = self.id_from_obj(self.params)
if identifier in checks:
return checks[identifier]
return None
def prepare_object(self, existing, obj):
existing = super(ConsulAgentCheckModule, self).prepare_object(existing, obj)
validate_check(existing)
return existing
def delete_object(self, obj):
if not self._module.check_mode:
self.put(self.endpoint_url(OPERATION_DELETE, obj.get("CheckID")))
return {}
def main():
module = AnsibleModule(
_ARGUMENT_SPEC,
mutually_exclusive=_MUTUALLY_EXCLUSIVE,
required_if=_REQUIRED_IF,
required_by=_REQUIRED_BY,
supports_check_mode=True,
)
consul_module = ConsulAgentCheckModule(module)
consul_module.execute()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,289 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Michael Ilg
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: consul_agent_service
short_description: Add, modify and delete services within a consul cluster
version_added: 9.1.0
description:
- Allows the addition, modification and deletion of services in a consul
cluster via the agent.
- There are currently no plans to create services and checks in one.
This is because the Consul API does not provide checks for a service and
the checks themselves do not match the module parameters.
Therefore, only a service without checks can be created in this module.
author:
- Michael Ilg (@Ilgmi)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.actiongroup_consul
- community.general.consul.token
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: partial
details:
- In check mode the diff will miss operational attributes.
options:
state:
description:
- Whether the service should be present or absent.
choices: ['present', 'absent']
default: present
type: str
name:
description:
- Unique name for the service on a node, must be unique per node,
required if registering a service.
type: str
id:
description:
- Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not provided.
If O(state=absent), defaults to the service name if supplied.
type: str
tags:
description:
- Tags that will be attached to the service registration.
type: list
elements: str
address:
description:
- The address to advertise that the service will be listening on.
This value will be passed as the C(address) parameter to Consul's
C(/v1/agent/service/register) API method, so refer to the Consul API
documentation for further details.
type: str
meta:
description:
- Optional meta data used for filtering.
For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed.
Not allowed characters are replaced with underscores.
type: dict
service_port:
description:
- The port on which the service is listening. Can optionally be supplied for
registration of a service, that is if O(name) or O(id) is set.
type: int
enable_tag_override:
description:
- Specifies to disable the anti-entropy feature for this service's tags.
If EnableTagOverride is set to true then external agents can update this service in the catalog and modify the tags.
type: bool
default: False
weights:
description:
- Specifies weights for the service
type: dict
suboptions:
passing:
description:
- Weights for passing.
type: int
default: 1
warning:
description:
- Weights for warning.
type: int
default: 1
default: {"passing": 1, "warning": 1}
'''
EXAMPLES = '''
- name: Register nginx service with the local consul agent
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
name: nginx
service_port: 80
- name: Register nginx with a tcp check
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
name: nginx
service_port: 80
- name: Register nginx with an http check
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
name: nginx
service_port: 80
- name: Register external service nginx available at 10.1.5.23
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
name: nginx
service_port: 80
address: 10.1.5.23
- name: Register nginx with some service tags
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
name: nginx
service_port: 80
tags:
- prod
- webservers
- name: Register nginx with some service meta
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
name: nginx
service_port: 80
meta:
nginx_version: 1.25.3
- name: Remove nginx service
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
service_id: nginx
state: absent
- name: Register celery worker service
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
name: celery-worker
tags:
- prod
- worker
'''
RETURN = """
service:
description: The service as returned by the consul HTTP API.
returned: always
type: dict
sample:
ID: nginx
Service: nginx
Address: localhost
Port: 80
Tags:
- http
Meta:
- nginx_version: 1.23.3
Datacenter: dc1
Weights:
Passing: 1
Warning: 1
ContentHash: 61a245cd985261ac
EnableTagOverride: false
operation:
description: The operation performed.
returned: changed
type: str
sample: update
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
OPERATION_CREATE,
OPERATION_UPDATE,
OPERATION_DELETE,
_ConsulModule
)
_CHECK_MUTUALLY_EXCLUSIVE = [('args', 'ttl', 'tcp', 'http')]
_CHECK_REQUIRED_BY = {
'args': 'interval',
'http': 'interval',
'tcp': 'interval',
}
_ARGUMENT_SPEC = {
"state": dict(default="present", choices=["present", "absent"]),
"name": dict(type='str'),
"id": dict(type='str'),
"tags": dict(type='list', elements='str'),
"address": dict(type='str'),
"meta": dict(type='dict'),
"service_port": dict(type='int'),
"enable_tag_override": dict(type='bool', default=False),
"weights": dict(type='dict', options=dict(
passing=dict(type='int', default=1, no_log=False),
warning=dict(type='int', default=1)
), default={"passing": 1, "warning": 1})
}
_REQUIRED_IF = [
('state', 'present', ['name']),
('state', 'absent', ('id', 'name'), True),
]
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
class ConsulAgentServiceModule(_ConsulModule):
api_endpoint = "agent/service"
result_key = "service"
unique_identifiers = ["id", "name"]
operational_attributes = {"Service", "ContentHash", "Datacenter"}
def endpoint_url(self, operation, identifier=None):
if operation in [OPERATION_CREATE, OPERATION_UPDATE]:
return "/".join([self.api_endpoint, "register"])
if operation == OPERATION_DELETE:
return "/".join([self.api_endpoint, "deregister", identifier])
return super(ConsulAgentServiceModule, self).endpoint_url(operation, identifier)
def prepare_object(self, existing, obj):
existing = super(ConsulAgentServiceModule, self).prepare_object(existing, obj)
if "ServicePort" in existing:
existing["Port"] = existing.pop("ServicePort")
if "ID" not in existing:
existing["ID"] = existing["Name"]
return existing
def needs_update(self, api_obj, module_obj):
obj = {}
if "Service" in api_obj:
obj["Service"] = api_obj["Service"]
api_obj = self.prepare_object(api_obj, obj)
if "Name" in module_obj:
module_obj["Service"] = module_obj.pop("Name")
if "ServicePort" in module_obj:
module_obj["Port"] = module_obj.pop("ServicePort")
return super(ConsulAgentServiceModule, self).needs_update(api_obj, module_obj)
def delete_object(self, obj):
if not self._module.check_mode:
url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True))
self.put(url)
return {}
def main():
module = AnsibleModule(
_ARGUMENT_SPEC,
required_if=_REQUIRED_IF,
supports_check_mode=True,
)
consul_module = ConsulAgentServiceModule(module)
consul_module.execute()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,207 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024, Florian Apolloner (@apollo13)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: consul_auth_method
short_description: Manipulate Consul auth methods
version_added: 8.3.0
description:
- Allows the addition, modification and deletion of auth methods in a consul
cluster via the agent. For more details on using and configuring ACLs,
see U(https://www.consul.io/docs/guides/acl.html).
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.actiongroup_consul
- community.general.consul.token
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: partial
details:
- In check mode the diff will miss operational attributes.
options:
state:
description:
- Whether the token should be present or absent.
choices: ['present', 'absent']
default: present
type: str
name:
description:
- Specifies a name for the ACL auth method.
- The name can contain alphanumeric characters, dashes C(-), and underscores C(_).
type: str
required: true
type:
description:
- The type of auth method being configured.
- This field is immutable.
- Required when the auth method is created.
type: str
choices: ['kubernetes', 'jwt', 'oidc', 'aws-iam']
description:
description:
- Free form human readable description of the auth method.
type: str
display_name:
description:
- An optional name to use instead of O(name) when displaying information about this auth method.
type: str
max_token_ttl:
description:
- This specifies the maximum life of any token created by this auth method.
- Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes, respectively).
type: str
token_locality:
description:
- Defines the kind of token that this auth method should produce.
type: str
choices: ['local', 'global']
config:
description:
- The raw configuration to use for the chosen auth method.
- Contents will vary depending upon the type chosen.
- Required when the auth method is created.
type: dict
"""
EXAMPLES = """
- name: Create an auth method
community.general.consul_auth_method:
name: test
type: jwt
config:
jwt_validation_pubkeys:
- |
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo
4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u
+qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh
kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ
0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg
cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc
mwIDAQAB
-----END PUBLIC KEY-----
token: "{{ consul_management_token }}"
- name: Delete auth method
community.general.consul_auth_method:
name: test
state: absent
token: "{{ consul_management_token }}"
"""
RETURN = """
auth_method:
description: The auth method as returned by the consul HTTP API.
returned: always
type: dict
sample:
Config:
JWTValidationPubkeys:
- |-
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo
4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u
+qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh
kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ
0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg
cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc
mwIDAQAB
-----END PUBLIC KEY-----
CreateIndex: 416
ModifyIndex: 487
Name: test
Type: jwt
operation:
description: The operation performed.
returned: changed
type: str
sample: update
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
_ConsulModule,
camel_case_key,
)
def normalize_ttl(ttl):
matches = re.findall(r"(\d+)(:h|m|s)", ttl)
ttl = 0
for value, unit in matches:
value = int(value)
if unit == "m":
value *= 60
elif unit == "h":
value *= 60 * 60
ttl += value
new_ttl = ""
hours, remainder = divmod(ttl, 3600)
if hours:
new_ttl += "{0}h".format(hours)
minutes, seconds = divmod(remainder, 60)
if minutes:
new_ttl += "{0}m".format(minutes)
if seconds:
new_ttl += "{0}s".format(seconds)
return new_ttl
class ConsulAuthMethodModule(_ConsulModule):
api_endpoint = "acl/auth-method"
result_key = "auth_method"
unique_identifiers = ["name"]
def map_param(self, k, v, is_update):
if k == "config" and v:
v = {camel_case_key(k2): v2 for k2, v2 in v.items()}
return super(ConsulAuthMethodModule, self).map_param(k, v, is_update)
def needs_update(self, api_obj, module_obj):
if "MaxTokenTTL" in module_obj:
module_obj["MaxTokenTTL"] = normalize_ttl(module_obj["MaxTokenTTL"])
return super(ConsulAuthMethodModule, self).needs_update(api_obj, module_obj)
_ARGUMENT_SPEC = {
"name": dict(type="str", required=True),
"type": dict(type="str", choices=["kubernetes", "jwt", "oidc", "aws-iam"]),
"description": dict(type="str"),
"display_name": dict(type="str"),
"max_token_ttl": dict(type="str", no_log=False),
"token_locality": dict(type="str", choices=["local", "global"]),
"config": dict(type="dict"),
"state": dict(default="present", choices=["present", "absent"]),
}
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
def main():
module = AnsibleModule(
_ARGUMENT_SPEC,
supports_check_mode=True,
)
consul_module = ConsulAuthMethodModule(module)
consul_module.execute()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,183 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024, Florian Apolloner (@apollo13)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: consul_binding_rule
short_description: Manipulate Consul binding rules
version_added: 8.3.0
description:
- Allows the addition, modification and deletion of binding rules in a consul
cluster via the agent. For more details on using and configuring binding rules,
see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules).
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.actiongroup_consul
- community.general.consul.token
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: partial
details:
- In check mode the diff will miss operational attributes.
options:
state:
description:
- Whether the binding rule should be present or absent.
choices: ['present', 'absent']
default: present
type: str
name:
description:
- Specifies a name for the binding rule.
- 'Note: This is used to identify the binding rule. But since the API does not support a name, it is prefixed to the description.'
type: str
required: true
description:
description:
- Free form human readable description of the binding rule.
type: str
auth_method:
description:
- The name of the auth method that this rule applies to.
type: str
required: true
selector:
description:
- Specifies the expression used to match this rule against valid identities returned from an auth method validation.
- If empty this binding rule matches all valid identities returned from the auth method.
type: str
bind_type:
description:
- Specifies the way the binding rule affects a token created at login.
type: str
choices: [service, node, role, templated-policy]
bind_name:
description:
- The name to bind to a token at login-time.
- What it binds to can be adjusted with different values of the O(bind_type) parameter.
type: str
bind_vars:
description:
- Specifies the templated policy variables when O(bind_type) is set to V(templated-policy).
type: dict
"""
EXAMPLES = """
- name: Create a binding rule
community.general.consul_binding_rule:
name: my_name
description: example rule
auth_method: minikube
bind_type: service
bind_name: "{{ serviceaccount.name }}"
token: "{{ consul_management_token }}"
- name: Remove a binding rule
community.general.consul_binding_rule:
name: my_name
auth_method: minikube
state: absent
"""
RETURN = """
binding_rule:
description: The binding rule as returned by the consul HTTP API.
returned: always
type: dict
sample:
Description: "my_name: example rule"
AuthMethod: minikube
Selector: serviceaccount.namespace==default
BindType: service
BindName: "{{ serviceaccount.name }}"
CreateIndex: 30
ID: 59c8a237-e481-4239-9202-45f117950c5f
ModifyIndex: 33
operation:
description: The operation performed.
returned: changed
type: str
sample: update
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
RequestError,
_ConsulModule,
)
class ConsulBindingRuleModule(_ConsulModule):
api_endpoint = "acl/binding-rule"
result_key = "binding_rule"
unique_identifiers = ["id"]
def read_object(self):
url = "acl/binding-rules?authmethod={0}".format(self.params["auth_method"])
try:
results = self.get(url)
for result in results:
if result.get("Description").startswith(
"{0}: ".format(self.params["name"])
):
return result
except RequestError as e:
if e.status == 404:
return
elif e.status == 403 and b"ACL not found" in e.response_data:
return
raise
def module_to_obj(self, is_update):
obj = super(ConsulBindingRuleModule, self).module_to_obj(is_update)
del obj["Name"]
return obj
def prepare_object(self, existing, obj):
final = super(ConsulBindingRuleModule, self).prepare_object(existing, obj)
name = self.params["name"]
description = final.pop("Description", "").split(": ", 1)[-1]
final["Description"] = "{0}: {1}".format(name, description)
return final
_ARGUMENT_SPEC = {
"name": dict(type="str", required=True),
"description": dict(type="str"),
"auth_method": dict(type="str", required=True),
"selector": dict(type="str"),
"bind_type": dict(
type="str", choices=["service", "node", "role", "templated-policy"]
),
"bind_name": dict(type="str"),
"bind_vars": dict(type="dict"),
"state": dict(default="present", choices=["present", "absent"]),
}
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
def main():
module = AnsibleModule(
_ARGUMENT_SPEC,
supports_check_mode=True,
)
consul_module = ConsulBindingRuleModule(module)
consul_module.execute()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,336 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# Copyright (c) 2018 Genome Research Ltd.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: consul_kv
short_description: Manipulate entries in the key/value store of a consul cluster
description:
- Allows the retrieval, addition, modification and deletion of key/value entries in a
consul cluster via the agent. The entire contents of the record, including
the indices, flags and session are returned as C(value).
- If the O(key) represents a prefix then note that when a value is removed, the existing
value if any is returned as part of the results.
- See http://www.consul.io/docs/agent/http.html#kv for more details.
requirements:
- python-consul
- requests
author:
- Steve Gargan (@sgargan)
- Colin Nolan (@colin-nolan)
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key
contents will be set to the value supplied and C(changed) will be set to V(true) only if the value was
different to the current contents. If the state is V(present) and O(value) is not set, the existing value
associated to the key will be returned. The state V(absent) will remove the key/value pair,
again C(changed) will be set to V(true) only if the key actually existed
prior to the removal. An attempt can be made to obtain or free the
lock associated with a key/value pair with the states V(acquire) or
V(release) respectively. a valid session must be supplied to make the
attempt changed will be true if the attempt is successful, false
otherwise.
type: str
choices: [ absent, acquire, present, release ]
default: present
key:
description:
- The key at which the value should be stored.
type: str
required: true
value:
description:
- The value should be associated with the given key, required if O(state)
is V(present).
type: str
recurse:
description:
- If the key represents a prefix, each entry with the prefix can be
retrieved by setting this to V(true).
type: bool
retrieve:
description:
- If the O(state) is V(present) and O(value) is set, perform a
read after setting the value and return this value.
default: true
type: bool
session:
description:
- The session that should be used to acquire or release a lock
associated with a key/value pair.
type: str
token:
description:
- The token key identifying an ACL rule set that controls access to
the key value pair
type: str
cas:
description:
- Used when acquiring a lock with a session. If the O(cas) is V(0), then
Consul will only put the key if it does not already exist. If the
O(cas) value is non-zero, then the key is only set if the index matches
the ModifyIndex of that key.
type: str
flags:
description:
- Opaque positive integer value that can be passed when setting a value.
type: str
host:
description:
- Host of the consul agent.
type: str
default: localhost
port:
description:
- The port on which the consul agent is running.
type: int
default: 8500
scheme:
description:
- The protocol scheme on which the consul agent is running.
type: str
default: http
validate_certs:
description:
- Whether to verify the tls certificate of the consul agent.
type: bool
default: true
'''
EXAMPLES = '''
# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
- name: Retrieve a value from the key/value store
community.general.consul_kv:
key: somekey
register: retrieved_key
- name: Add or update the value associated with a key in the key/value store
community.general.consul_kv:
key: somekey
value: somevalue
- name: Remove a key from the store
community.general.consul_kv:
key: somekey
state: absent
- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
community.general.consul_kv:
key: ansible/groups/dc1/somenode
value: top_secret
- name: Register a key/value pair with an associated session
community.general.consul_kv:
key: stg/node/server_birthday
value: 20160509
session: "{{ sessionid }}"
state: acquire
'''
from ansible.module_utils.common.text.converters import to_text
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
from ansible.module_utils.basic import AnsibleModule
# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
NOT_SET = None
def _has_value_changed(consul_client, key, target_value):
"""
Uses the given Consul client to determine if the value associated to the given key is different to the given target
value.
:param consul_client: Consul connected client
:param key: key in Consul
:param target_value: value to be associated to the key
:return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
value has changed (i.e. the stored value is not the target value)
"""
index, existing = consul_client.kv.get(key)
if not existing:
return index, True
try:
changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
return index, changed
except UnicodeError:
# Existing value was not decodable but all values we set are valid utf-8
return index, True
def execute(module):
state = module.params.get('state')
if state == 'acquire' or state == 'release':
lock(module, state)
elif state == 'present':
if module.params.get('value') is NOT_SET:
get_value(module)
else:
set_value(module)
elif state == 'absent':
remove_value(module)
else:
module.exit_json(msg="Unsupported state: %s" % (state, ))
def lock(module, state):
consul_api = get_consul_api(module)
session = module.params.get('session')
key = module.params.get('key')
value = module.params.get('value')
if not session:
module.fail(
msg='%s of lock for %s requested but no session supplied' %
(state, key))
index, changed = _has_value_changed(consul_api, key, value)
if changed and not module.check_mode:
if state == 'acquire':
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
acquire=session,
flags=module.params.get('flags'))
else:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
release=session,
flags=module.params.get('flags'))
module.exit_json(changed=changed,
index=index,
key=key)
def get_value(module):
consul_api = get_consul_api(module)
key = module.params.get('key')
index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
module.exit_json(changed=False, index=index, data=existing_value)
def set_value(module):
consul_api = get_consul_api(module)
key = module.params.get('key')
value = module.params.get('value')
if value is NOT_SET:
raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
index, changed = _has_value_changed(consul_api, key, value)
if changed and not module.check_mode:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
flags=module.params.get('flags'))
stored = None
if module.params.get('retrieve'):
index, stored = consul_api.kv.get(key)
module.exit_json(changed=changed,
index=index,
key=key,
data=stored)
def remove_value(module):
''' remove the value associated with the given key. if the recurse parameter
is set then any key prefixed with the given key will be removed. '''
consul_api = get_consul_api(module)
key = module.params.get('key')
index, existing = consul_api.kv.get(
key, recurse=module.params.get('recurse'))
changed = existing is not None
if changed and not module.check_mode:
consul_api.kv.delete(key, module.params.get('recurse'))
module.exit_json(changed=changed,
index=index,
key=key,
data=existing)
def get_consul_api(module):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'),
scheme=module.params.get('scheme'),
verify=module.params.get('validate_certs'),
token=module.params.get('token'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "
"see https://python-consul.readthedocs.io/en/latest/#installation")
def main():
module = AnsibleModule(
argument_spec=dict(
cas=dict(type='str'),
flags=dict(type='str'),
key=dict(type='str', required=True, no_log=False),
host=dict(type='str', default='localhost'),
scheme=dict(type='str', default='http'),
validate_certs=dict(type='bool', default=True),
port=dict(type='int', default=8500),
recurse=dict(type='bool'),
retrieve=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
token=dict(type='str', no_log=True),
value=dict(type='str', default=NOT_SET),
session=dict(type='str'),
),
supports_check_mode=True
)
test_dependencies(module)
try:
execute(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), e))
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,166 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2022, Håkon Lerring
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: consul_policy
short_description: Manipulate Consul policies
version_added: 7.2.0
description:
- Allows the addition, modification and deletion of policies in a consul
cluster via the agent. For more details on using and configuring ACLs,
see U(https://www.consul.io/docs/guides/acl.html).
author:
- Håkon Lerring (@Hakon)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.actiongroup_consul
- community.general.consul.token
- community.general.attributes
attributes:
check_mode:
support: full
version_added: 8.3.0
diff_mode:
support: partial
version_added: 8.3.0
details:
- In check mode the diff will miss operational attributes.
action_group:
version_added: 8.3.0
options:
state:
description:
- Whether the policy should be present or absent.
choices: ['present', 'absent']
default: present
type: str
valid_datacenters:
description:
- Valid datacenters for the policy. All if list is empty.
type: list
elements: str
name:
description:
- The name that should be associated with the policy, this is opaque
to Consul.
required: true
type: str
description:
description:
- Description of the policy.
type: str
rules:
type: str
description:
- Rule document that should be associated with the current policy.
"""
EXAMPLES = """
- name: Create a policy with rules
community.general.consul_policy:
host: consul1.example.com
token: some_management_acl
name: foo-access
rules: |
key "foo" {
policy = "read"
}
key "private/foo" {
policy = "deny"
}
- name: Update the rules associated to a policy
community.general.consul_policy:
host: consul1.example.com
token: some_management_acl
name: foo-access
rules: |
key "foo" {
policy = "read"
}
key "private/foo" {
policy = "deny"
}
event "bbq" {
policy = "write"
}
- name: Remove a policy
community.general.consul_policy:
host: consul1.example.com
token: some_management_acl
name: foo-access
state: absent
"""
RETURN = """
policy:
description: The policy as returned by the consul HTTP API.
returned: always
type: dict
sample:
CreateIndex: 632
Description: Testing
Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
Name: foo-access
Rules: |-
key "foo" {
policy = "read"
}
key "private/foo" {
policy = "deny"
}
operation:
description: The operation performed.
returned: changed
type: str
sample: update
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
OPERATION_READ,
_ConsulModule,
)
_ARGUMENT_SPEC = {
"name": dict(required=True),
"description": dict(required=False, type="str"),
"rules": dict(type="str"),
"valid_datacenters": dict(type="list", elements="str"),
"state": dict(default="present", choices=["present", "absent"]),
}
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
class ConsulPolicyModule(_ConsulModule):
api_endpoint = "acl/policy"
result_key = "policy"
unique_identifiers = ["id"]
def endpoint_url(self, operation, identifier=None):
if operation == OPERATION_READ:
return [self.api_endpoint, "name", self.params["name"]]
return super(ConsulPolicyModule, self).endpoint_url(operation, identifier)
def main():
module = AnsibleModule(
_ARGUMENT_SPEC,
supports_check_mode=True,
)
consul_module = ConsulPolicyModule(module)
consul_module.execute()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,283 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2022, Håkon Lerring
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: consul_role
short_description: Manipulate Consul roles
version_added: 7.5.0
description:
- Allows the addition, modification and deletion of roles in a consul
cluster via the agent. For more details on using and configuring ACLs,
see U(https://www.consul.io/docs/guides/acl.html).
author:
- Håkon Lerring (@Hakon)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.token
- community.general.consul.actiongroup_consul
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: partial
details:
- In check mode the diff will miss operational attributes.
version_added: 8.3.0
action_group:
version_added: 8.3.0
options:
name:
description:
- A name used to identify the role.
required: true
type: str
state:
description:
- whether the role should be present or absent.
choices: ['present', 'absent']
default: present
type: str
description:
description:
- Description of the role.
- If not specified, the assigned description will not be changed.
type: str
policies:
type: list
elements: dict
description:
- List of policies to attach to the role. Each policy is a dict.
- If the parameter is left blank, any policies currently assigned will not be changed.
- Any empty array (V([])) will clear any policies previously set.
suboptions:
name:
description:
- The name of the policy to attach to this role; see M(community.general.consul_policy) for more info.
- Either this or O(policies[].id) must be specified.
type: str
id:
description:
- The ID of the policy to attach to this role; see M(community.general.consul_policy) for more info.
- Either this or O(policies[].name) must be specified.
type: str
templated_policies:
description:
- The list of templated policies that should be applied to the role.
type: list
elements: dict
version_added: 8.3.0
suboptions:
template_name:
description:
- The templated policy name.
type: str
required: true
template_variables:
description:
- The templated policy variables.
- Not all templated policies require variables.
type: dict
service_identities:
type: list
elements: dict
description:
- List of service identities to attach to the role.
- If not specified, any service identities currently assigned will not be changed.
- If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
suboptions:
service_name:
description:
- The name of the node.
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
- May only contain lowercase alphanumeric characters as well as - and _.
- This suboption has been renamed from O(service_identities[].name) to O(service_identities[].service_name)
in community.general 8.3.0. The old name can still be used.
type: str
required: true
aliases:
- name
datacenters:
description:
- The datacenters the policies will be effective.
- This will result in effective policy only being valid in this datacenter.
- If an empty array (V([])) is specified, the policies will valid in all datacenters.
- including those which do not yet exist but may in the future.
type: list
elements: str
node_identities:
type: list
elements: dict
description:
- List of node identities to attach to the role.
- If not specified, any node identities currently assigned will not be changed.
- If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
suboptions:
node_name:
description:
- The name of the node.
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
- May only contain lowercase alphanumeric characters as well as - and _.
- This suboption has been renamed from O(node_identities[].name) to O(node_identities[].node_name)
in community.general 8.3.0. The old name can still be used.
type: str
required: true
aliases:
- name
datacenter:
description:
- The nodes datacenter.
- This will result in effective policy only being valid in this datacenter.
type: str
required: true
"""
EXAMPLES = """
- name: Create a role with 2 policies
community.general.consul_role:
host: consul1.example.com
token: some_management_acl
name: foo-role
policies:
- id: 783beef3-783f-f41f-7422-7087dc272765
- name: "policy-1"
- name: Create a role with service identity
community.general.consul_role:
host: consul1.example.com
token: some_management_acl
name: foo-role-2
service_identities:
- name: web
datacenters:
- dc1
- name: Create a role with node identity
community.general.consul_role:
host: consul1.example.com
token: some_management_acl
name: foo-role-3
node_identities:
- name: node-1
datacenter: dc2
- name: Remove a role
community.general.consul_role:
host: consul1.example.com
token: some_management_acl
name: foo-role-3
state: absent
"""
RETURN = """
role:
description: The role object.
returned: success
type: dict
sample:
{
"CreateIndex": 39,
"Description": "",
"Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=",
"ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5",
"ModifyIndex": 39,
"Name": "foo-role",
"Policies": [
{"ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", "Name": "foo-access"}
]
}
operation:
description: The operation performed on the role.
returned: changed
type: str
sample: update
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
OPERATION_READ,
_ConsulModule,
)
class ConsulRoleModule(_ConsulModule):
api_endpoint = "acl/role"
result_key = "role"
unique_identifiers = ["id"]
def endpoint_url(self, operation, identifier=None):
if operation == OPERATION_READ:
return [self.api_endpoint, "name", self.params["name"]]
return super(ConsulRoleModule, self).endpoint_url(operation, identifier)
NAME_ID_SPEC = dict(
name=dict(type="str"),
id=dict(type="str"),
)
NODE_ID_SPEC = dict(
node_name=dict(type="str", required=True, aliases=["name"]),
datacenter=dict(type="str", required=True),
)
SERVICE_ID_SPEC = dict(
service_name=dict(type="str", required=True, aliases=["name"]),
datacenters=dict(type="list", elements="str"),
)
TEMPLATE_POLICY_SPEC = dict(
template_name=dict(type="str", required=True),
template_variables=dict(type="dict"),
)
_ARGUMENT_SPEC = {
"name": dict(type="str", required=True),
"description": dict(type="str"),
"policies": dict(
type="list",
elements="dict",
options=NAME_ID_SPEC,
mutually_exclusive=[("name", "id")],
required_one_of=[("name", "id")],
),
"templated_policies": dict(
type="list",
elements="dict",
options=TEMPLATE_POLICY_SPEC,
),
"node_identities": dict(
type="list",
elements="dict",
options=NODE_ID_SPEC,
),
"service_identities": dict(
type="list",
elements="dict",
options=SERVICE_ID_SPEC,
),
"state": dict(default="present", choices=["present", "absent"]),
}
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
def main():
module = AnsibleModule(
_ARGUMENT_SPEC,
supports_check_mode=True,
)
consul_module = ConsulRoleModule(module)
consul_module.execute()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,309 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: consul_session
short_description: Manipulate consul sessions
description:
- Allows the addition, modification and deletion of sessions in a consul
cluster. These sessions can then be used in conjunction with key value pairs
to implement distributed locks. In depth documentation for working with
sessions can be found at http://www.consul.io/docs/internals/sessions.html
author:
- Steve Gargan (@sgargan)
- Håkon Lerring (@Hakon)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.actiongroup_consul
- community.general.consul.token
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
action_group:
version_added: 8.3.0
options:
id:
description:
- ID of the session, required when O(state) is either V(info) or
V(remove).
type: str
state:
description:
- Whether the session should be present i.e. created if it doesn't
exist, or absent, removed if present. If created, the O(id) for the
session is returned in the output. If V(absent), O(id) is
required to remove the session. Info for a single session, all the
sessions for a node or all available sessions can be retrieved by
specifying V(info), V(node) or V(list) for the O(state); for V(node)
or V(info), the node O(name) or session O(id) is required as parameter.
choices: [ absent, info, list, node, present ]
type: str
default: present
name:
description:
- The name that should be associated with the session. Required when
O(state=node) is used.
type: str
delay:
description:
- The optional lock delay that can be attached to the session when it
is created. Locks for invalidated sessions ar blocked from being
acquired until this delay has expired. Durations are in seconds.
type: int
default: 15
node:
description:
- The name of the node that with which the session will be associated.
by default this is the name of the agent.
type: str
datacenter:
description:
- The name of the datacenter in which the session exists or should be
created.
type: str
checks:
description:
- Checks that will be used to verify the session health. If
all the checks fail, the session will be invalidated and any locks
associated with the session will be release and can be acquired once
the associated lock delay has expired.
type: list
elements: str
behavior:
description:
- The optional behavior that can be attached to the session when it
is created. This controls the behavior when a session is invalidated.
choices: [ delete, release ]
type: str
default: release
ttl:
description:
- Specifies the duration of a session in seconds (between 10 and 86400).
type: int
version_added: 5.4.0
token:
version_added: 5.6.0
'''
EXAMPLES = '''
- name: Register basic session with consul
community.general.consul_session:
name: session1
- name: Register a session with an existing check
community.general.consul_session:
name: session_with_check
checks:
- existing_check_name
- name: Register a session with lock_delay
community.general.consul_session:
name: session_with_delay
delay: 20s
- name: Retrieve info about session by id
community.general.consul_session:
id: session_id
state: info
- name: Retrieve active sessions
community.general.consul_session:
state: list
- name: Register session with a ttl
community.general.consul_session:
name: session-with-ttl
ttl: 600 # sec
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC, _ConsulModule
)
def execute(module, consul_module):
state = module.params.get('state')
if state in ['info', 'list', 'node']:
lookup_sessions(module, consul_module)
elif state == 'present':
update_session(module, consul_module)
else:
remove_session(module, consul_module)
def list_sessions(consul_module, datacenter):
return consul_module.get(
'session/list',
params={'dc': datacenter})
def list_sessions_for_node(consul_module, node, datacenter):
return consul_module.get(
('session', 'node', node),
params={'dc': datacenter})
def get_session_info(consul_module, session_id, datacenter):
return consul_module.get(
('session', 'info', session_id),
params={'dc': datacenter})
def lookup_sessions(module, consul_module):
datacenter = module.params.get('datacenter')
state = module.params.get('state')
try:
if state == 'list':
sessions_list = list_sessions(consul_module, datacenter)
# Ditch the index, this can be grabbed from the results
if sessions_list and len(sessions_list) >= 2:
sessions_list = sessions_list[1]
module.exit_json(changed=True,
sessions=sessions_list)
elif state == 'node':
node = module.params.get('node')
sessions = list_sessions_for_node(consul_module, node, datacenter)
module.exit_json(changed=True,
node=node,
sessions=sessions)
elif state == 'info':
session_id = module.params.get('id')
session_by_id = get_session_info(consul_module, session_id, datacenter)
module.exit_json(changed=True,
session_id=session_id,
sessions=session_by_id)
except Exception as e:
module.fail_json(msg="Could not retrieve session info %s" % e)
def create_session(consul_module, name, behavior, ttl, node,
lock_delay, datacenter, checks):
create_data = {
"LockDelay": lock_delay,
"Node": node,
"Name": name,
"Checks": checks,
"Behavior": behavior,
}
if ttl is not None:
create_data["TTL"] = "%ss" % str(ttl) # TTL is in seconds
create_session_response_dict = consul_module.put(
'session/create',
params={
'dc': datacenter},
data=create_data)
return create_session_response_dict["ID"]
def update_session(module, consul_module):
name = module.params.get('name')
delay = module.params.get('delay')
checks = module.params.get('checks')
datacenter = module.params.get('datacenter')
node = module.params.get('node')
behavior = module.params.get('behavior')
ttl = module.params.get('ttl')
try:
session = create_session(consul_module,
name=name,
behavior=behavior,
ttl=ttl,
node=node,
lock_delay=delay,
datacenter=datacenter,
checks=checks
)
module.exit_json(changed=True,
session_id=session,
name=name,
behavior=behavior,
ttl=ttl,
delay=delay,
checks=checks,
node=node)
except Exception as e:
module.fail_json(msg="Could not create/update session %s" % e)
def destroy_session(consul_module, session_id):
return consul_module.put(('session', 'destroy', session_id))
def remove_session(module, consul_module):
session_id = module.params.get('id')
try:
destroy_session(consul_module, session_id)
module.exit_json(changed=True,
session_id=session_id)
except Exception as e:
module.fail_json(msg="Could not remove session with id '%s' %s" % (
session_id, e))
def main():
argument_spec = dict(
checks=dict(type='list', elements='str'),
delay=dict(type='int', default='15'),
behavior=dict(
type='str',
default='release',
choices=[
'release',
'delete']),
ttl=dict(type='int'),
id=dict(type='str'),
name=dict(type='str'),
node=dict(type='str'),
state=dict(
type='str',
default='present',
choices=[
'absent',
'info',
'list',
'node',
'present']),
datacenter=dict(type='str'),
**AUTH_ARGUMENTS_SPEC
)
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'node', ['name']),
('state', 'info', ['id']),
('state', 'remove', ['id']),
],
supports_check_mode=False
)
consul_module = _ConsulModule(module)
try:
execute(module, consul_module)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,333 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024, Florian Apolloner (@apollo13)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: consul_token
short_description: Manipulate Consul tokens
version_added: 8.3.0
description:
- Allows the addition, modification and deletion of tokens in a consul
cluster via the agent. For more details on using and configuring ACLs,
see U(https://www.consul.io/docs/guides/acl.html).
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
- community.general.consul
- community.general.consul.token
- community.general.consul.actiongroup_consul
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: partial
details:
- In check mode the diff will miss operational attributes.
action_group:
version_added: 8.3.0
options:
state:
description:
- Whether the token should be present or absent.
choices: ['present', 'absent']
default: present
type: str
accessor_id:
description:
- Specifies a UUID to use as the token's Accessor ID.
If not specified a UUID will be generated for this field.
type: str
secret_id:
description:
- Specifies a UUID to use as the token's Secret ID.
If not specified a UUID will be generated for this field.
type: str
description:
description:
- Free form human readable description of the token.
type: str
policies:
type: list
elements: dict
description:
- List of policies to attach to the token. Each policy is a dict.
- If the parameter is left blank, any policies currently assigned will not be changed.
- Any empty array (V([])) will clear any policies previously set.
suboptions:
name:
description:
- The name of the policy to attach to this token; see M(community.general.consul_policy) for more info.
- Either this or O(policies[].id) must be specified.
type: str
id:
description:
- The ID of the policy to attach to this token; see M(community.general.consul_policy) for more info.
- Either this or O(policies[].name) must be specified.
type: str
roles:
type: list
elements: dict
description:
- List of roles to attach to the token. Each role is a dict.
- If the parameter is left blank, any roles currently assigned will not be changed.
- Any empty array (V([])) will clear any roles previously set.
suboptions:
name:
description:
- The name of the role to attach to this token; see M(community.general.consul_role) for more info.
- Either this or O(roles[].id) must be specified.
type: str
id:
description:
- The ID of the role to attach to this token; see M(community.general.consul_role) for more info.
- Either this or O(roles[].name) must be specified.
type: str
templated_policies:
description:
- The list of templated policies that should be applied to the role.
type: list
elements: dict
suboptions:
template_name:
description:
- The templated policy name.
type: str
required: true
template_variables:
description:
- The templated policy variables.
- Not all templated policies require variables.
type: dict
service_identities:
type: list
elements: dict
description:
- List of service identities to attach to the token.
- If not specified, any service identities currently assigned will not be changed.
- If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
suboptions:
service_name:
description:
- The name of the service.
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
- May only contain lowercase alphanumeric characters as well as V(-) and V(_).
type: str
required: true
datacenters:
description:
- The datacenters the token will be effective.
- If an empty array (V([])) is specified, the token will valid in all datacenters.
- including those which do not yet exist but may in the future.
type: list
elements: str
node_identities:
type: list
elements: dict
description:
- List of node identities to attach to the token.
- If not specified, any node identities currently assigned will not be changed.
- If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
suboptions:
node_name:
description:
- The name of the node.
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
- May only contain lowercase alphanumeric characters as well as V(-) and V(_).
type: str
required: true
datacenter:
description:
- The nodes datacenter.
- This will result in effective token only being valid in this datacenter.
type: str
required: true
local:
description:
- If true, indicates that the token should not be replicated globally
and instead be local to the current datacenter.
type: bool
expiration_ttl:
description:
- This is a convenience field and if set will initialize the C(expiration_time).
Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes,
respectively). Ingored when the token is updated!
type: str
"""
EXAMPLES = """
- name: Create / Update a token by accessor_id
community.general.consul_token:
state: present
accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21
token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8
roles:
- name: role1
- name: role2
service_identities:
- service_name: service1
datacenters: [dc1, dc2]
node_identities:
- node_name: node1
datacenter: dc1
expiration_ttl: 50m
- name: Delete a token
community.general.consul_token:
state: absent
accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21
token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8
"""
RETURN = """
token:
description: The token as returned by the consul HTTP API.
returned: always
type: dict
sample:
AccessorID: 07a7de84-c9c7-448a-99cc-beaf682efd21
CreateIndex: 632
CreateTime: "2024-01-14T21:53:01.402749174+01:00"
Description: Testing
Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
Local: false
ModifyIndex: 633
SecretID: bd380fba-da17-7cee-8576-8d6427c6c930
ServiceIdentities: [{"ServiceName": "test"}]
operation:
description: The operation performed.
returned: changed
type: str
sample: update
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
AUTH_ARGUMENTS_SPEC,
_ConsulModule,
)
def normalize_link_obj(api_obj, module_obj, key):
api_objs = api_obj.get(key)
module_objs = module_obj.get(key)
if api_objs is None or module_objs is None:
return
name_to_id = {i["Name"]: i["ID"] for i in api_objs}
id_to_name = {i["ID"]: i["Name"] for i in api_objs}
for obj in module_objs:
identifier = obj.get("ID")
name = obj.get("Name)")
if identifier and not name and identifier in id_to_name:
obj["Name"] = id_to_name[identifier]
if not identifier and name and name in name_to_id:
obj["ID"] = name_to_id[name]
class ConsulTokenModule(_ConsulModule):
api_endpoint = "acl/token"
result_key = "token"
unique_identifiers = ["accessor_id"]
create_only_fields = {"expiration_ttl"}
def read_object(self):
# if `accessor_id` is not supplied we can only create objects and are not idempotent
if not self.id_from_obj(self.params):
return None
return super(ConsulTokenModule, self).read_object()
def needs_update(self, api_obj, module_obj):
# SecretID is usually not supplied
if "SecretID" not in module_obj and "SecretID" in api_obj:
del api_obj["SecretID"]
normalize_link_obj(api_obj, module_obj, "Roles")
normalize_link_obj(api_obj, module_obj, "Policies")
# ExpirationTTL is only supported on create, not for update
# it writes to ExpirationTime, so we need to remove that as well
if "ExpirationTTL" in module_obj:
del module_obj["ExpirationTTL"]
return super(ConsulTokenModule, self).needs_update(api_obj, module_obj)
NAME_ID_SPEC = dict(
name=dict(type="str"),
id=dict(type="str"),
)
NODE_ID_SPEC = dict(
node_name=dict(type="str", required=True),
datacenter=dict(type="str", required=True),
)
SERVICE_ID_SPEC = dict(
service_name=dict(type="str", required=True),
datacenters=dict(type="list", elements="str"),
)
TEMPLATE_POLICY_SPEC = dict(
template_name=dict(type="str", required=True),
template_variables=dict(type="dict"),
)
_ARGUMENT_SPEC = {
"description": dict(),
"accessor_id": dict(),
"secret_id": dict(no_log=True),
"roles": dict(
type="list",
elements="dict",
options=NAME_ID_SPEC,
mutually_exclusive=[("name", "id")],
required_one_of=[("name", "id")],
),
"policies": dict(
type="list",
elements="dict",
options=NAME_ID_SPEC,
mutually_exclusive=[("name", "id")],
required_one_of=[("name", "id")],
),
"templated_policies": dict(
type="list",
elements="dict",
options=TEMPLATE_POLICY_SPEC,
),
"node_identities": dict(
type="list",
elements="dict",
options=NODE_ID_SPEC,
),
"service_identities": dict(
type="list",
elements="dict",
options=SERVICE_ID_SPEC,
),
"local": dict(type="bool"),
"expiration_ttl": dict(type="str"),
"state": dict(default="present", choices=["present", "absent"]),
}
_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
def main():
module = AnsibleModule(
_ARGUMENT_SPEC,
required_if=[("state", "absent", ["accessor_id"])],
supports_check_mode=True,
)
consul_module = ConsulTokenModule(module)
consul_module.execute()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,516 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Silvie Chlupova <schlupov@redhat.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: copr
short_description: Manage one of the Copr repositories
version_added: 2.0.0
description: This module can enable, disable or remove the specified repository.
author: Silvie Chlupova (@schlupov) <schlupov@redhat.com>
requirements:
- dnf
- dnf-plugins-core
notes:
- Supports C(check_mode).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
host:
description: The Copr host to work with.
default: copr.fedorainfracloud.org
type: str
protocol:
description: This indicate which protocol to use with the host.
default: https
type: str
name:
description: Copr directory name, for example C(@copr/copr-dev).
required: true
type: str
state:
description:
- Whether to set this project as V(enabled), V(disabled), or V(absent).
default: enabled
type: str
choices: [absent, enabled, disabled]
chroot:
description:
- The name of the chroot that you want to enable/disable/remove in the project,
for example V(epel-7-x86_64). Default chroot is determined by the operating system,
version of the operating system, and architecture on which the module is run.
type: str
"""
EXAMPLES = r"""
- name: Enable project Test of the user schlupov
community.general.copr:
host: copr.fedorainfracloud.org
state: enabled
name: schlupov/Test
chroot: fedora-31-x86_64
- name: Remove project integration_tests of the group copr
community.general.copr:
state: absent
name: '@copr/integration_tests'
"""
RETURN = r"""
repo_filename:
description: The name of the repo file in which the copr project information is stored.
returned: success
type: str
sample: _copr:copr.fedorainfracloud.org:group_copr:integration_tests.repo
repo:
description: Path to the project on the host.
returned: success
type: str
sample: copr.fedorainfracloud.org/group_copr/integration_tests
"""
import stat
import os
import traceback
try:
import dnf
import dnf.cli
import dnf.repodict
from dnf.conf import Conf
HAS_DNF_PACKAGES = True
DNF_IMP_ERR = None
except ImportError:
DNF_IMP_ERR = traceback.format_exc()
HAS_DNF_PACKAGES = False
from ansible.module_utils.common import respawn
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils import distro
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import open_url
def _respawn_dnf():
if respawn.has_respawned():
return
system_interpreters = (
"/usr/libexec/platform-python",
"/usr/bin/python3",
"/usr/bin/python2",
"/usr/bin/python",
)
interpreter = respawn.probe_interpreters_for_module(system_interpreters, "dnf")
if interpreter:
respawn.respawn_module(interpreter)
class CoprModule(object):
"""The class represents a copr module.
The class contains methods that take care of the repository state of a project,
whether the project is enabled, disabled or missing.
"""
ansible_module = None
def __init__(self, host, name, state, protocol, chroot=None, check_mode=False):
self.host = host
self.name = name
self.state = state
self.chroot = chroot
self.protocol = protocol
self.check_mode = check_mode
if not chroot:
self.chroot = self.chroot_conf()
else:
self.chroot = chroot
self.get_base()
@property
def short_chroot(self):
"""str: Chroot (distribution-version-architecture) shorten to distribution-version."""
return self.chroot.rsplit('-', 1)[0]
@property
def arch(self):
"""str: Target architecture."""
chroot_parts = self.chroot.split("-")
return chroot_parts[-1]
@property
def user(self):
"""str: Copr user (this can also be the name of the group)."""
return self._sanitize_username(self.name.split("/")[0])
@property
def project(self):
"""str: The name of the copr project."""
return self.name.split("/")[1]
@classmethod
def need_root(cls):
"""Check if the module was run as root."""
if os.geteuid() != 0:
cls.raise_exception("This command has to be run under the root user.")
@classmethod
def get_base(cls):
"""Initialize the configuration from dnf.
Returns:
An instance of the BaseCli class.
"""
cls.base = dnf.cli.cli.BaseCli(Conf())
return cls.base
@classmethod
def raise_exception(cls, msg):
"""Raise either an ansible exception or a python exception.
Args:
msg: The message to be displayed when an exception is thrown.
"""
if cls.ansible_module:
raise cls.ansible_module.fail_json(msg=msg, changed=False)
raise Exception(msg)
def _get(self, chroot):
"""Send a get request to the server to obtain the necessary data.
Args:
chroot: Chroot in the form of distribution-version.
Returns:
Info about a repository and status code of the get request.
"""
repo_info = None
url = "{0}://{1}/coprs/{2}/repo/{3}/dnf.repo?arch={4}".format(
self.protocol, self.host, self.name, chroot, self.arch
)
try:
r = open_url(url)
status_code = r.getcode()
repo_info = r.read().decode("utf-8")
except HTTPError as e:
status_code = e.getcode()
return repo_info, status_code
def _download_repo_info(self):
"""Download information about the repository.
Returns:
Information about the repository.
"""
distribution, version = self.short_chroot.split('-', 1)
chroot = self.short_chroot
while True:
repo_info, status_code = self._get(chroot)
if repo_info:
return repo_info
if distribution == "rhel":
chroot = "centos-stream-8"
distribution = "centos"
elif distribution == "centos":
if version == "stream-8":
version = "8"
elif version == "stream-9":
version = "9"
chroot = "epel-{0}".format(version)
distribution = "epel"
else:
if str(status_code) != "404":
self.raise_exception(
"This repository does not have any builds yet so you cannot enable it now."
)
else:
self.raise_exception(
"Chroot {0} does not exist in {1}".format(self.chroot, self.name)
)
def _enable_repo(self, repo_filename_path, repo_content=None):
"""Write information to a repo file.
Args:
repo_filename_path: Path to repository.
repo_content: Repository information from the host.
Returns:
True, if the information in the repo file matches that stored on the host,
False otherwise.
"""
if not repo_content:
repo_content = self._download_repo_info()
if self._compare_repo_content(repo_filename_path, repo_content):
return False
if not self.check_mode:
with open(repo_filename_path, "w+") as file:
file.write(repo_content)
os.chmod(
repo_filename_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH,
)
return True
def _get_repo_with_old_id(self):
"""Try to get a repository with the old name."""
repo_id = "{0}-{1}".format(self.user, self.project)
if repo_id in self.base.repos and "_copr" in self.base.repos[repo_id].repofile:
file_name = self.base.repos[repo_id].repofile.split("/")[-1]
try:
copr_hostname = file_name.rsplit(":", 2)[0].split(":", 1)[1]
if copr_hostname != self.host:
return None
return file_name
except IndexError:
return file_name
return None
def _read_all_repos(self, repo_id=None):
"""The method is used to initialize the base variable by
repositories using the RepoReader class from dnf.
Args:
repo_id: Repo id of the repository we want to work with.
"""
reader = dnf.conf.read.RepoReader(self.base.conf, None)
for repo in reader:
try:
if repo_id:
if repo.id == repo_id:
self.base.repos.add(repo)
break
else:
self.base.repos.add(repo)
except dnf.exceptions.ConfigError as e:
self.raise_exception(str(e))
def _get_copr_repo(self):
"""Return one specific repository from all repositories on the system.
Returns:
The repository that a user wants to enable, disable, or remove.
"""
repo_id = "copr:{0}:{1}:{2}".format(self.host, self.user, self.project)
if repo_id not in self.base.repos:
if self._get_repo_with_old_id() is None:
return None
return self.base.repos[repo_id]
def _disable_repo(self, repo_filename_path):
"""Disable the repository.
Args:
repo_filename_path: Path to repository.
Returns:
False, if the repository is already disabled on the system,
True otherwise.
"""
self._read_all_repos()
repo = self._get_copr_repo()
if repo is None:
if self.check_mode:
return True
self._enable_repo(repo_filename_path)
self._read_all_repos("copr:{0}:{1}:{2}".format(self.host, self.user, self.project))
repo = self._get_copr_repo()
for repo_id in repo.cfg.sections():
repo_content_api = self._download_repo_info()
with open(repo_filename_path, "r") as file:
repo_content_file = file.read()
if repo_content_file != repo_content_api:
if not self.resolve_differences(
repo_content_file, repo_content_api, repo_filename_path
):
return False
if not self.check_mode:
self.base.conf.write_raw_configfile(
repo.repofile, repo_id, self.base.conf.substitutions, {"enabled": "0"},
)
return True
def resolve_differences(self, repo_content_file, repo_content_api, repo_filename_path):
"""Detect differences between the contents of the repository stored on the
system and the information about the repository on the server.
Args:
repo_content_file: The contents of the repository stored on the system.
repo_content_api: The information about the repository from the server.
repo_filename_path: Path to repository.
Returns:
False, if the contents of the repo file and the information on the server match,
True otherwise.
"""
repo_file_lines = repo_content_file.split("\n")
repo_api_lines = repo_content_api.split("\n")
repo_api_lines.remove("enabled=1")
if "enabled=0" in repo_file_lines:
repo_file_lines.remove("enabled=0")
if " ".join(repo_api_lines) == " ".join(repo_file_lines):
return False
if not self.check_mode:
os.remove(repo_filename_path)
self._enable_repo(repo_filename_path, repo_content_api)
else:
repo_file_lines.remove("enabled=1")
if " ".join(repo_api_lines) != " ".join(repo_file_lines):
if not self.check_mode:
os.remove(repo_filename_path)
self._enable_repo(repo_filename_path, repo_content_api)
return True
def _remove_repo(self):
"""Remove the required repository.
Returns:
True, if the repository has been removed, False otherwise.
"""
self._read_all_repos()
repo = self._get_copr_repo()
if not repo:
return False
if not self.check_mode:
try:
os.remove(repo.repofile)
except OSError as e:
self.raise_exception(str(e))
return True
def run(self):
"""The method uses methods of the CoprModule class to change the state of the repository.
Returns:
Dictionary with information that the ansible module displays to the user at the end of the run.
"""
self.need_root()
state = dict()
repo_filename = "_copr:{0}:{1}:{2}.repo".format(self.host, self.user, self.project)
state["repo"] = "{0}/{1}/{2}".format(self.host, self.user, self.project)
state["repo_filename"] = repo_filename
repo_filename_path = "{0}/_copr:{1}:{2}:{3}.repo".format(
self.base.conf.get_reposdir, self.host, self.user, self.project
)
if self.state == "enabled":
enabled = self._enable_repo(repo_filename_path)
state["msg"] = "enabled"
state["state"] = bool(enabled)
elif self.state == "disabled":
disabled = self._disable_repo(repo_filename_path)
state["msg"] = "disabled"
state["state"] = bool(disabled)
elif self.state == "absent":
removed = self._remove_repo()
state["msg"] = "absent"
state["state"] = bool(removed)
return state
@staticmethod
def _compare_repo_content(repo_filename_path, repo_content_api):
"""Compare the contents of the stored repository with the information from the server.
Args:
repo_filename_path: Path to repository.
repo_content_api: The information about the repository from the server.
Returns:
True, if the information matches, False otherwise.
"""
if not os.path.isfile(repo_filename_path):
return False
with open(repo_filename_path, "r") as file:
repo_content_file = file.read()
return repo_content_file == repo_content_api
@staticmethod
def chroot_conf():
"""Obtain information about the distribution, version, and architecture of the target.
Returns:
Chroot info in the form of distribution-version-architecture.
"""
(distribution, version, codename) = distro.linux_distribution(full_distribution_name=False)
base = CoprModule.get_base()
return "{0}-{1}-{2}".format(distribution, version, base.conf.arch)
@staticmethod
def _sanitize_username(user):
"""Modify the group name.
Args:
user: User name.
Returns:
Modified user name if it is a group name with @.
"""
if user[0] == "@":
return "group_{0}".format(user[1:])
return user
def run_module():
"""The function takes care of the functioning of the whole ansible copr module."""
module_args = dict(
host=dict(type="str", default="copr.fedorainfracloud.org"),
protocol=dict(type="str", default="https"),
name=dict(type="str", required=True),
state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"),
chroot=dict(type="str"),
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
params = module.params
if not HAS_DNF_PACKAGES:
_respawn_dnf()
module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR)
CoprModule.ansible_module = module
copr_module = CoprModule(
host=params["host"],
name=params["name"],
state=params["state"],
protocol=params["protocol"],
chroot=params["chroot"],
check_mode=module.check_mode,
)
state = copr_module.run()
info = "Please note that this repository is not part of the main distribution"
if params["state"] == "enabled" and state["state"]:
module.exit_json(
changed=state["state"],
msg=state["msg"],
repo=state["repo"],
repo_filename=state["repo_filename"],
info=info,
)
module.exit_json(
changed=state["state"],
msg=state["msg"],
repo=state["repo"],
repo_filename=state["repo_filename"],
)
def main():
"""Launches ansible Copr module."""
run_module()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,244 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Franck Cuny <franck@lumberjaph.net>
# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: cpanm
short_description: Manages Perl library dependencies
description:
- Manage Perl library dependencies using cpanminus.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
type: str
description:
- The Perl library to install. Valid values change according to the O(mode), see notes for more details.
- Note that for installing from a local path the parameter O(from_path) should be used.
aliases: [pkg]
from_path:
type: path
description:
- The local directory or C(tar.gz) file to install from.
notest:
description:
- Do not run unit tests.
type: bool
default: false
locallib:
description:
- Specify the install base to install modules.
type: path
mirror:
description:
- Specifies the base URL for the CPAN mirror to use.
type: str
mirror_only:
description:
- Use the mirror's index file instead of the CPAN Meta DB.
type: bool
default: false
installdeps:
description:
- Only install dependencies.
type: bool
default: false
version:
description:
- Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted.
type: str
executable:
description:
- Override the path to the cpanm executable.
type: path
mode:
description:
- Controls the module behavior. See notes below for more details.
- The default changed from V(compatibility) to V(new) in community.general 9.0.0.
type: str
choices: [compatibility, new]
default: new
version_added: 3.0.0
name_check:
description:
- When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when specified).
type: str
version_added: 3.0.0
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
- "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)."
- >
O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility.
This was the default mode before community.general 9.0.0.
O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version)
when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL.
C(cpanm) version specifiers do not work in this mode.
- >
O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file,
a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized.
This is the default mode from community.general 9.0.0 onwards.
author:
- "Franck Cuny (@fcuny)"
- "Alexei Znamensky (@russoz)"
'''
EXAMPLES = '''
- name: Install Dancer perl package
community.general.cpanm:
name: Dancer
- name: Install version 0.99_05 of the Plack perl package
community.general.cpanm:
name: MIYAGAWA/Plack-0.99_05.tar.gz
- name: Install Dancer into the specified locallib
community.general.cpanm:
name: Dancer
locallib: /srv/webapps/my_app/extlib
- name: Install perl dependencies from local directory
community.general.cpanm:
from_path: /srv/webapps/my_app/src/
- name: Install Dancer perl package without running the unit tests in indicated locallib
community.general.cpanm:
name: Dancer
notest: true
locallib: /srv/webapps/my_app/extlib
- name: Install Dancer perl package from a specific mirror
community.general.cpanm:
name: Dancer
mirror: 'http://cpan.cpantesters.org/'
- name: Install Dancer perl package into the system root path
become: true
community.general.cpanm:
name: Dancer
- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0
community.general.cpanm:
name: Dancer
version: '1.0'
'''
import os
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
class CPANMinus(ModuleHelper):
output_params = ['name', 'version']
module = dict(
argument_spec=dict(
name=dict(type='str', aliases=['pkg']),
version=dict(type='str'),
from_path=dict(type='path'),
notest=dict(type='bool', default=False),
locallib=dict(type='path'),
mirror=dict(type='str'),
mirror_only=dict(type='bool', default=False),
installdeps=dict(type='bool', default=False),
executable=dict(type='path'),
mode=dict(type='str', default='new', choices=['compatibility', 'new']),
name_check=dict(type='str')
),
required_one_of=[('name', 'from_path')],
)
command = 'cpanm'
command_args_formats = dict(
notest=cmd_runner_fmt.as_bool("--notest"),
locallib=cmd_runner_fmt.as_opt_val('--local-lib'),
mirror=cmd_runner_fmt.as_opt_val('--mirror'),
mirror_only=cmd_runner_fmt.as_bool("--mirror-only"),
installdeps=cmd_runner_fmt.as_bool("--installdeps"),
pkg_spec=cmd_runner_fmt.as_list(),
)
use_old_vardict = False
def __init_module__(self):
v = self.vars
if v.mode == "compatibility":
if v.name_check:
self.do_raise("Parameter name_check can only be used with mode=new")
else:
if v.name and v.from_path:
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
self.command = v.executable if v.executable else self.command
self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
self.vars.binary = self.runner.binary
def _is_package_installed(self, name, locallib, version):
def process(rc, out, err):
return rc == 0
if name is None or name.endswith('.tar.gz'):
return False
version = "" if version is None else " " + version
env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {}
runner = CmdRunner(self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env)
with runner("mod", output_process=process) as ctx:
return ctx.run(mod='use %s%s;' % (name, version))
def sanitize_pkg_spec_version(self, pkg_spec, version):
if version is None:
return pkg_spec
if pkg_spec.endswith('.tar.gz'):
self.do_raise(msg="parameter 'version' must not be used when installing from a file")
if os.path.isdir(pkg_spec):
self.do_raise(msg="parameter 'version' must not be used when installing from a directory")
if pkg_spec.endswith('.git'):
if version.startswith('~'):
self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository")
version = version if version.startswith('@') else '@' + version
elif version[0] not in ('@', '~'):
version = '~' + version
return pkg_spec + version
def __run__(self):
def process(rc, out, err):
if self.vars.mode == "compatibility" and rc != 0:
self.do_raise(msg=err, cmd=self.vars.cmd_args)
return 'is up to date' not in err and 'is up to date' not in out
v = self.vars
pkg_param = 'from_path' if v.from_path else 'name'
if v.mode == 'compatibility':
if self._is_package_installed(v.name, v.locallib, v.version):
return
pkg_spec = v[pkg_param]
else:
installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False
if installed:
return
pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
with self.runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
self.changed = ctx.run(pkg_spec=pkg_spec)
def main():
CPANMinus.execute()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,432 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Cronvar Plugin: The goal of this plugin is to provide an idempotent
# method for set cron variable values. It should play well with the
# existing cron module as well as allow for manually added variables.
# Each variable entered will be preceded with a comment describing the
# variable so that it can be found later. This is required to be
# present in order for this plugin to find/modify the variable
# This module is based on the crontab module.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cronvar
short_description: Manage variables in crontabs
description:
- Use this module to manage crontab variables.
- This module allows you to create, update, or delete cron variable definitions.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- Name of the crontab variable.
type: str
required: true
value:
description:
- The value to set this variable to.
- Required if O(state=present).
type: str
insertafter:
description:
- If specified, the variable will be inserted after the variable specified.
- Used with O(state=present).
type: str
insertbefore:
description:
- Used with O(state=present). If specified, the variable will be inserted
just before the variable specified.
type: str
state:
description:
- Whether to ensure that the variable is present or absent.
type: str
choices: [ absent, present ]
default: present
user:
description:
- The specific user whose crontab should be modified.
- This parameter defaults to V(root) when unset.
type: str
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- Without a leading V(/), this is assumed to be in C(/etc/cron.d).
- With a leading V(/), this is taken as absolute.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup) variable by this module.
# TODO: C() above should be RV(), but return values have not been documented!
type: bool
default: false
requirements:
- cron
author:
- Doug Luce (@dougluce)
'''
EXAMPLES = r'''
- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
community.general.cronvar:
name: EMAIL
value: doug@ansibmod.con.com
- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
community.general.cronvar:
name: LEGACY
state: absent
- name: Add a variable to a file under /etc/cron.d
community.general.cronvar:
name: LOGFILE
value: /var/log/yum-autoupdate.log
user: root
cron_file: ansible_yum-autoupdate
'''
import os
import platform
import pwd
import re
import shlex
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
class CronVarError(Exception):
pass
class CronVar(object):
"""
CronVar object to write variables to crontabs.
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.lines = None
self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
if cron_file:
self.cron_file = ""
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.lines = f.read().splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronVarError("Unable to read crontab")
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
count += 1
def log_message(self, message):
self.module.debug('ansible: "%s"' % message)
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
def remove_variable_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
def parse_for_var(self, line):
lexer = shlex.shlex(line)
lexer.wordchars = self.wordchars
varname = lexer.get_token()
is_env_var = lexer.get_token() == '='
value = ''.join(lexer)
if is_env_var:
return (varname, value)
raise CronVarError("Not a variable.")
def find_variable(self, name):
for l in self.lines:
try:
(varname, value) = self.parse_for_var(l)
if varname == name:
return value
except CronVarError:
pass
return None
def get_var_names(self):
var_names = []
for l in self.lines:
try:
var_name, dummy = self.parse_for_var(l)
var_names.append(var_name)
except CronVarError:
pass
return var_names
def add_variable(self, name, value, insertbefore, insertafter):
if insertbefore is None and insertafter is None:
# Add the variable to the top of the file.
self.lines.insert(0, "%s=%s" % (name, value))
else:
newlines = []
for l in self.lines:
try:
varname, dummy = self.parse_for_var(l) # Throws if not a var line
if varname == insertbefore:
newlines.append("%s=%s" % (name, value))
newlines.append(l)
elif varname == insertafter:
newlines.append(l)
newlines.append("%s=%s" % (name, value))
else:
raise CronVarError # Append.
except CronVarError:
newlines.append(l)
self.lines = newlines
def remove_variable(self, name):
self.update_variable(name, None, remove=True)
def update_variable(self, name, value, remove=False):
newlines = []
for l in self.lines:
try:
varname, dummy = self.parse_for_var(l) # Throws if not a var line
if varname != name:
raise CronVarError # Append.
if not remove:
newlines.append("%s=%s" % (name, value))
except CronVarError:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render a proper crontab
"""
result = '\n'.join(self.lines)
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
# ==================================================
def main():
# The following example playbooks:
#
# - community.general.cronvar: name="SHELL" value="/bin/bash"
#
# - name: Set the email
# community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
#
# - name: Get rid of the old new host variable
# community.general.cronvar: name="NEW_HOST" state=absent
#
# Would produce:
# SHELL = /bin/bash
# EMAILTO = doug@ansibmod.con.com
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
value=dict(type='str'),
user=dict(type='str'),
cron_file=dict(type='str'),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
backup=dict(type='bool', default=False),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
supports_check_mode=False,
)
name = module.params['name']
value = module.params['value']
user = module.params['user']
cron_file = module.params['cron_file']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
state = module.params['state']
backup = module.params['backup']
ensure_present = state == 'present'
changed = False
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
cronvar = CronVar(module, user, cron_file)
module.debug('cronvar instantiated - name: "%s"' % name)
# --- user input validation ---
if name is None and ensure_present:
module.fail_json(msg="You must specify 'name' to insert a new cron variable")
if value is None and ensure_present:
module.fail_json(msg="You must specify 'value' to insert a new cron variable")
if name is None and not ensure_present:
module.fail_json(msg="You must specify 'name' to remove a cron variable")
# if requested make a backup before making a change
if backup:
dummy, backup_file = tempfile.mkstemp(prefix='cronvar')
cronvar.write(backup_file)
if cronvar.cron_file and not name and not ensure_present:
changed = cronvar.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state)
old_value = cronvar.find_variable(name)
if ensure_present:
if old_value is None:
cronvar.add_variable(name, value, insertbefore, insertafter)
changed = True
elif old_value != value:
cronvar.update_variable(name, value)
changed = True
else:
if old_value is not None:
cronvar.remove_variable(name)
changed = True
res_args = {
"vars": cronvar.get_var_names(),
"changed": changed
}
if changed:
cronvar.write()
# retain the backup only if crontab or cron file have changed
if backup:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,362 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Steve <yo@groks.org>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: crypttab
short_description: Encrypted Linux block devices
description:
- Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
optionally prefixed with V(/dev/mapper/), as it appears in the filesystem. V(/dev/mapper/)
will be stripped from O(name).
type: str
required: true
state:
description:
- Use V(present) to add a line to C(/etc/crypttab) or update its definition
if already present.
- Use V(absent) to remove a line with matching O(name).
- Use V(opts_present) to add options to those already present; options with
different values will be updated.
- Use V(opts_absent) to remove options from the existing set.
type: str
required: true
choices: [ absent, opts_absent, opts_present, present ]
backing_device:
description:
- Path to the underlying block device or file, or the UUID of a block-device
prefixed with V(UUID=).
type: str
password:
description:
- Encryption password, the path to a file containing the password, or
V(-) or unset if the password should be entered at boot.
type: path
opts:
description:
- A comma-delimited list of options. See V(crypttab(5\)) for details.
type: str
path:
description:
- Path to file to use instead of V(/etc/crypttab).
- This might be useful in a chroot environment.
type: path
default: /etc/crypttab
author:
- Steve (@groks)
'''
EXAMPLES = r'''
- name: Set the options explicitly a device which must already exist
community.general.crypttab:
name: luks-home
state: present
opts: discard,cipher=aes-cbc-essiv:sha256
- name: Add the 'discard' option to any existing options for all devices
community.general.crypttab:
name: '{{ item.device }}'
state: opts_present
opts: discard
loop: '{{ ansible_mounts }}'
when: "'/dev/mapper/luks-' in {{ item.device }}"
'''
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
backing_device=dict(type='str'),
password=dict(type='path'),
opts=dict(type='str'),
path=dict(type='path', default='/etc/crypttab')
),
supports_check_mode=True,
)
backing_device = module.params['backing_device']
password = module.params['password']
opts = module.params['opts']
state = module.params['state']
path = module.params['path']
name = module.params['name']
if name.startswith('/dev/mapper/'):
name = name[len('/dev/mapper/'):]
if state != 'absent' and backing_device is None and password is None and opts is None:
module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
**module.params)
if 'opts' in state and (backing_device is not None or password is not None):
module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
**module.params)
for arg_name, arg in (('name', name),
('backing_device', backing_device),
('password', password),
('opts', opts)):
if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
**module.params)
try:
crypttab = Crypttab(path)
existing_line = crypttab.match(name)
except Exception as e:
module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
exception=traceback.format_exc(), **module.params)
if 'present' in state and existing_line is None and backing_device is None:
module.fail_json(msg="'backing_device' required to add a new entry",
**module.params)
changed, reason = False, '?'
if state == 'absent':
if existing_line is not None:
changed, reason = existing_line.remove()
elif state == 'present':
if existing_line is not None:
changed, reason = existing_line.set(backing_device, password, opts)
else:
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
elif state == 'opts_present':
if existing_line is not None:
changed, reason = existing_line.opts.add(opts)
else:
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
elif state == 'opts_absent':
if existing_line is not None:
changed, reason = existing_line.opts.remove(opts)
if changed and not module.check_mode:
try:
f = open(path, 'wb')
f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
finally:
f.close()
module.exit_json(changed=changed, msg=reason, **module.params)
class Crypttab(object):
_lines = []
def __init__(self, path):
self.path = path
if not os.path.exists(path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
open(path, 'a').close()
try:
f = open(path, 'r')
for line in f.readlines():
self._lines.append(Line(line))
finally:
f.close()
def add(self, line):
self._lines.append(line)
return True, 'added line'
def lines(self):
for line in self._lines:
if line.valid():
yield line
def match(self, name):
for line in self.lines():
if line.name == name:
return line
return None
def __str__(self):
lines = []
for line in self._lines:
lines.append(str(line))
crypttab = '\n'.join(lines)
if len(crypttab) == 0:
crypttab += '\n'
if crypttab[-1] != '\n':
crypttab += '\n'
return crypttab
class Line(object):
def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
self.line = line
self.name = name
self.backing_device = backing_device
self.password = password
self.opts = Options(opts)
if line is not None:
self.line = self.line.rstrip('\n')
if self._line_valid(line):
self.name, backing_device, password, opts = self._split_line(line)
self.set(backing_device, password, opts)
def set(self, backing_device, password, opts):
changed = False
if backing_device is not None and self.backing_device != backing_device:
self.backing_device = backing_device
changed = True
if password is not None and self.password != password:
self.password = password
changed = True
if opts is not None:
opts = Options(opts)
if opts != self.opts:
self.opts = opts
changed = True
return changed, 'updated line'
def _line_valid(self, line):
if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
return False
return True
def _split_line(self, line):
fields = line.split()
try:
field2 = fields[2]
except IndexError:
field2 = None
try:
field3 = fields[3]
except IndexError:
field3 = None
return (fields[0],
fields[1],
field2,
field3)
def remove(self):
self.line, self.name, self.backing_device = '', None, None
return True, 'removed line'
def valid(self):
if self.name is not None and self.backing_device is not None:
return True
return False
def __str__(self):
if self.valid():
fields = [self.name, self.backing_device]
if self.password is not None or self.opts:
if self.password is not None:
fields.append(self.password)
else:
fields.append('none')
if self.opts:
fields.append(str(self.opts))
return ' '.join(fields)
return self.line
class Options(dict):
"""opts_string looks like: 'discard,foo=bar,baz=greeble' """
def __init__(self, opts_string):
super(Options, self).__init__()
self.itemlist = []
if opts_string is not None:
for opt in opts_string.split(','):
kv = opt.split('=')
if len(kv) > 1:
k, v = (kv[0], kv[1])
else:
k, v = (kv[0], None)
self[k] = v
def add(self, opts_string):
changed = False
for k, v in Options(opts_string).items():
if k in self:
if self[k] != v:
changed = True
else:
changed = True
self[k] = v
return changed, 'updated options'
def remove(self, opts_string):
changed = False
for k in Options(opts_string):
if k in self:
del self[k]
changed = True
return changed, 'removed options'
def keys(self):
return self.itemlist
def values(self):
return [self[key] for key in self]
def items(self):
return [(key, self[key]) for key in self]
def __iter__(self):
return iter(self.itemlist)
def __setitem__(self, key, value):
if key not in self:
self.itemlist.append(key)
super(Options, self).__setitem__(key, value)
def __delitem__(self, key):
self.itemlist.remove(key)
super(Options, self).__delitem__(key)
def __ne__(self, obj):
return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
def __str__(self):
ret = []
for k, v in self.items():
if v is None:
ret.append(k)
else:
ret.append('%s=%s' % (k, v))
return ','.join(ret)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,316 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Datadog, Inc
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: datadog_downtime
short_description: Manages Datadog downtimes
version_added: 2.0.0
description:
- Manages downtimes within Datadog.
- Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/).
author:
- Datadog (@Datadog)
requirements:
- datadog-api-client
- Python 3.6+
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
api_key:
description:
- Your Datadog API key.
required: true
type: str
api_host:
description:
- The URL to the Datadog API.
- This value can also be set with the E(DATADOG_HOST) environment variable.
required: false
default: https://api.datadoghq.com
type: str
app_key:
description:
- Your Datadog app key.
required: true
type: str
state:
description:
- The designated state of the downtime.
required: false
choices: ["present", "absent"]
default: present
type: str
id:
description:
- The identifier of the downtime.
- If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the O(state).
- To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup.
type: int
monitor_tags:
description:
- A list of monitor tags to which the downtime applies.
- The resulting downtime applies to monitors that match ALL provided monitor tags.
type: list
elements: str
scope:
description:
- A list of scopes to which the downtime applies.
- The resulting downtime applies to sources that matches ALL provided scopes.
type: list
elements: str
monitor_id:
description:
- The ID of the monitor to mute. If not provided, the downtime applies to all monitors.
type: int
downtime_message:
description:
- A message to include with notifications for this downtime.
- Email notifications can be sent to specific users by using the same "@username" notation as events.
type: str
start:
type: int
description:
- POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created.
end:
type: int
description:
- POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it.
timezone:
description:
- The timezone for the downtime.
type: str
rrule:
description:
- The C(RRULE) standard for defining recurring events.
- For example, to have a recurring event on the first day of each month,
select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1).
- Most common rrule options from the iCalendar Spec are supported.
- Attributes specifying the duration in C(RRULE) are not supported (for example C(DTSTART), C(DTEND), C(DURATION)).
type: str
"""
EXAMPLES = """
- name: Create a downtime
register: downtime_var
community.general.datadog_downtime:
state: present
monitor_tags:
- "foo:bar"
downtime_message: "Downtime for foo:bar"
scope: "test"
api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created
id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}"
- name: Save downtime id to file for later updates and idempotence
delegate_to: localhost
copy:
content: "{{ downtime.downtime.id }}"
dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}"
"""
RETURN = """
# Returns the downtime JSON dictionary from the API response under the C(downtime) key.
# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details.
downtime:
description: The downtime returned by the API.
type: dict
returned: always
sample: {
"active": true,
"canceled": null,
"creator_id": 1445416,
"disabled": false,
"downtime_type": 2,
"end": null,
"id": 1055751000,
"message": "Downtime for foo:bar",
"monitor_id": null,
"monitor_tags": [
"foo:bar"
],
"parent_id": null,
"recurrence": null,
"scope": [
"test"
],
"start": 1607015009,
"timezone": "UTC",
"updater_id": null
}
"""
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
# Import Datadog
DATADOG_IMP_ERR = None
HAS_DATADOG = True
try:
from datadog_api_client.v1 import Configuration, ApiClient, ApiException
from datadog_api_client.v1.api.downtimes_api import DowntimesApi
from datadog_api_client.v1.model.downtime import Downtime
from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence
except ImportError:
DATADOG_IMP_ERR = traceback.format_exc()
HAS_DATADOG = False
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
api_host=dict(required=False, default="https://api.datadoghq.com"),
app_key=dict(required=True, no_log=True),
state=dict(required=False, choices=["present", "absent"], default="present"),
monitor_tags=dict(required=False, type="list", elements="str"),
scope=dict(required=False, type="list", elements="str"),
monitor_id=dict(required=False, type="int"),
downtime_message=dict(required=False, no_log=True),
start=dict(required=False, type="int"),
end=dict(required=False, type="int"),
timezone=dict(required=False, type="str"),
rrule=dict(required=False, type="str"),
id=dict(required=False, type="int"),
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg=missing_required_lib("datadog-api-client"), exception=DATADOG_IMP_ERR)
configuration = Configuration(
host=module.params["api_host"],
api_key={
"apiKeyAuth": module.params["api_key"],
"appKeyAuth": module.params["app_key"]
}
)
with ApiClient(configuration) as api_client:
api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format(
api_client.user_agent
)
api_instance = DowntimesApi(api_client)
# Validate api and app keys
try:
api_instance.list_downtimes(current_only=True)
except ApiException as e:
module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key: {0}".format(e))
if module.params["state"] == "present":
schedule_downtime(module, api_client)
elif module.params["state"] == "absent":
cancel_downtime(module, api_client)
def _get_downtime(module, api_client):
api = DowntimesApi(api_client)
downtime = None
if module.params["id"]:
try:
downtime = api.get_downtime(module.params["id"])
except ApiException as e:
module.fail_json(msg="Failed to retrieve downtime with id {0}: {1}".format(module.params["id"], e))
return downtime
def build_downtime(module):
downtime = Downtime()
if module.params["monitor_tags"]:
downtime.monitor_tags = module.params["monitor_tags"]
if module.params["scope"]:
downtime.scope = module.params["scope"]
if module.params["monitor_id"]:
downtime.monitor_id = module.params["monitor_id"]
if module.params["downtime_message"]:
downtime.message = module.params["downtime_message"]
if module.params["start"]:
downtime.start = module.params["start"]
if module.params["end"]:
downtime.end = module.params["end"]
if module.params["timezone"]:
downtime.timezone = module.params["timezone"]
if module.params["rrule"]:
downtime.recurrence = DowntimeRecurrence(
rrule=module.params["rrule"],
type="rrule",
)
return downtime
def _post_downtime(module, api_client):
api = DowntimesApi(api_client)
downtime = build_downtime(module)
try:
resp = api.create_downtime(downtime)
module.params["id"] = resp.id
module.exit_json(changed=True, downtime=resp.to_dict())
except ApiException as e:
module.fail_json(msg="Failed to create downtime: {0}".format(e))
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_downtime(module, current_downtime, api_client):
api = DowntimesApi(api_client)
downtime = build_downtime(module)
try:
if current_downtime.disabled:
resp = api.create_downtime(downtime)
else:
resp = api.update_downtime(module.params["id"], downtime)
if _equal_dicts(
resp.to_dict(),
current_downtime.to_dict(),
["active", "creator_id", "updater_id"]
):
module.exit_json(changed=False, downtime=resp.to_dict())
else:
module.exit_json(changed=True, downtime=resp.to_dict())
except ApiException as e:
module.fail_json(msg="Failed to update downtime: {0}".format(e))
def schedule_downtime(module, api_client):
downtime = _get_downtime(module, api_client)
if downtime is None:
_post_downtime(module, api_client)
else:
_update_downtime(module, downtime, api_client)
def cancel_downtime(module, api_client):
downtime = _get_downtime(module, api_client)
api = DowntimesApi(api_client)
if downtime is None:
module.exit_json(changed=False)
try:
api.cancel_downtime(downtime["id"])
except ApiException as e:
module.fail_json(msg="Failed to create downtime: {0}".format(e))
module.exit_json(changed=True)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,193 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
# Author: Naoya Nakazawa <naoya.n@gmail.com>
#
# This module is proudly sponsored by iGeolise (www.igeolise.com) and
# Tiny Lab Productions (www.tinylabproductions.com).
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: datadog_event
short_description: Posts events to Datadog service
description:
- "Allows to post events to Datadog (www.datadoghq.com) service."
- "Uses http://docs.datadoghq.com/api/#events API."
author:
- "Artūras 'arturaz' Šlajus (@arturaz)"
- "Naoya Nakazawa (@n0ts)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
api_key:
type: str
description: ["Your DataDog API key."]
required: true
app_key:
type: str
description: ["Your DataDog app key."]
required: true
title:
type: str
description: ["The event title."]
required: true
text:
type: str
description: ["The body of the event."]
required: true
date_happened:
type: int
description:
- POSIX timestamp of the event.
- Default value is now.
priority:
type: str
description: ["The priority of the event."]
default: normal
choices: [normal, low]
host:
type: str
description:
- Host name to associate with the event.
- If not specified, it defaults to the remote system's hostname.
api_host:
type: str
description:
- DataDog API endpoint URL.
version_added: '3.3.0'
tags:
type: list
elements: str
description: ["Comma separated list of tags to apply to the event."]
alert_type:
type: str
description: ["Type of alert."]
default: info
choices: ['error', 'warning', 'info', 'success']
aggregation_key:
type: str
description: ["An arbitrary string to use for aggregation."]
validate_certs:
description:
- If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
'''
EXAMPLES = '''
- name: Post an event with low priority
community.general.datadog_event:
title: Testing from ansible
text: Test
priority: low
api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
- name: Post an event with several tags
community.general.datadog_event:
title: Testing from ansible
text: Test
api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
tags: 'aa,bb,#host:{{ inventory_hostname }}'
- name: Post an event with several tags to another endpoint
community.general.datadog_event:
title: Testing from ansible
text: Test
api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
api_host: 'https://example.datadoghq.eu'
tags:
- aa
- b
- '#host:{{ inventory_hostname }}'
'''
import platform
import traceback
# Import Datadog
DATADOG_IMP_ERR = None
try:
from datadog import initialize, api
HAS_DATADOG = True
except Exception:
DATADOG_IMP_ERR = traceback.format_exc()
HAS_DATADOG = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
app_key=dict(required=True, no_log=True),
api_host=dict(type='str'),
title=dict(required=True),
text=dict(required=True),
date_happened=dict(type='int'),
priority=dict(default='normal', choices=['normal', 'low']),
host=dict(),
tags=dict(type='list', elements='str'),
alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']),
aggregation_key=dict(no_log=False),
validate_certs=dict(default=True, type='bool'),
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key'],
}
if module.params['api_host'] is not None:
options['api_host'] = module.params['api_host']
initialize(**options)
_post_event(module)
def _post_event(module):
try:
if module.params['host'] is None:
module.params['host'] = platform.node().split('.')[0]
msg = api.Event.create(title=module.params['title'],
text=module.params['text'],
host=module.params['host'],
tags=module.params['tags'],
priority=module.params['priority'],
alert_type=module.params['alert_type'],
aggregation_key=module.params['aggregation_key'],
source_type_name='ansible')
if msg['status'] != 'ok':
module.fail_json(msg=msg)
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,464 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: datadog_monitor
short_description: Manages Datadog monitors
description:
- Manages monitors within Datadog.
- Options as described on https://docs.datadoghq.com/api/.
author: Sebastian Kornehl (@skornehl)
requirements: [datadog]
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
api_key:
description:
- Your Datadog API key.
required: true
type: str
api_host:
description:
- The URL to the Datadog API. Default value is V(https://api.datadoghq.com).
- This value can also be set with the E(DATADOG_HOST) environment variable.
required: false
type: str
version_added: '0.2.0'
app_key:
description:
- Your Datadog app key.
required: true
type: str
state:
description:
- The designated state of the monitor.
required: true
choices: ['present', 'absent', 'mute', 'unmute']
type: str
tags:
description:
- A list of tags to associate with your monitor when creating or updating.
- This can help you categorize and filter monitors.
type: list
elements: str
type:
description:
- The type of the monitor.
- The types V(query alert), V(trace-analytics alert) and V(rum alert) were added in community.general 2.1.0.
- The type V(composite) was added in community.general 3.4.0.
- The type V(event-v2 alert) was added in community.general 4.8.0.
choices:
- metric alert
- service check
- event alert
- event-v2 alert
- process alert
- log alert
- query alert
- trace-analytics alert
- rum alert
- composite
type: str
query:
description:
- The monitor query to notify on.
- Syntax varies depending on what type of monitor you are creating.
type: str
name:
description:
- The name of the alert.
required: true
type: str
notification_message:
description:
- A message to include with notifications for this monitor.
- Email notifications can be sent to specific users by using the same '@username' notation as events.
- Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
type: str
silenced:
type: dict
description:
- Dictionary of scopes to silence, with timestamps or None.
- Each scope will be muted until the given POSIX timestamp or forever if the value is None.
notify_no_data:
description:
- Whether this monitor will notify when data stops reporting.
type: bool
default: false
no_data_timeframe:
description:
- The number of minutes before a monitor will notify when data stops reporting.
- Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
- If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
type: str
timeout_h:
description:
- The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
type: str
renotify_interval:
description:
- The number of minutes after the last notification before a monitor will re-notify on the current status.
- It will only re-notify if it is not resolved.
type: str
escalation_message:
description:
- A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
- Not applicable if O(renotify_interval=none).
type: str
notify_audit:
description:
- Whether tagged users will be notified on changes to this monitor.
type: bool
default: false
thresholds:
type: dict
description:
- A dictionary of thresholds by status.
- Only available for service checks and metric alerts.
- Because each of them can have multiple thresholds, we do not define them directly in the query.
- "If not specified, it defaults to: V({'ok': 1, 'critical': 1, 'warning': 1})."
locked:
description:
- Whether changes to this monitor should be restricted to the creator or admins.
type: bool
default: false
require_full_window:
description:
- Whether this monitor needs a full window of data before it gets evaluated.
- We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
type: bool
new_host_delay:
description:
- A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
- This gives the host time to fully initialize.
type: str
evaluation_delay:
description:
- Time to delay evaluation (in seconds).
- Effective for sparse values.
type: str
id:
description:
- The ID of the alert.
- If set, will be used instead of the name to locate the alert.
type: str
include_tags:
description:
- Whether notifications from this monitor automatically inserts its triggering tags into the title.
type: bool
default: true
version_added: 1.3.0
priority:
description:
- Integer from 1 (high) to 5 (low) indicating alert severity.
type: int
version_added: 4.6.0
notification_preset_name:
description:
- Toggles the display of additional content sent in the monitor notification.
choices:
- show_all
- hide_query
- hide_handles
- hide_all
type: str
version_added: 7.1.0
renotify_occurrences:
description:
- The number of times re-notification messages should be sent on the current status at the provided re-notification interval.
type: int
version_added: 7.1.0
renotify_statuses:
description:
- The types of monitor statuses for which re-notification messages are sent.
choices:
- alert
- warn
- no data
type: list
elements: str
version_added: 7.1.0
'''
EXAMPLES = '''
- name: Create a metric monitor
community.general.datadog_monitor:
type: "metric alert"
name: "Test monitor"
state: "present"
renotify_interval: 30
renotify_occurrences: 1
renotify_statuses: ["warn"]
notification_preset_name: "show_all"
query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
- name: Deletes a monitor
community.general.datadog_monitor:
name: "Test monitor"
state: "absent"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
- name: Mutes a monitor
community.general.datadog_monitor:
name: "Test monitor"
state: "mute"
silenced: '{"*":None}'
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
- name: Unmutes a monitor
community.general.datadog_monitor:
name: "Test monitor"
state: "unmute"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
- name: Use datadoghq.eu platform instead of datadoghq.com
community.general.datadog_monitor:
name: "Test monitor"
state: "absent"
api_host: https://api.datadoghq.eu
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
import traceback
# Import Datadog
DATADOG_IMP_ERR = None
try:
from datadog import initialize, api
HAS_DATADOG = True
except Exception:
DATADOG_IMP_ERR = traceback.format_exc()
HAS_DATADOG = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
api_host=dict(),
app_key=dict(required=True, no_log=True),
state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
type=dict(choices=['metric alert', 'service check', 'event alert', 'event-v2 alert', 'process alert',
'log alert', 'query alert', 'trace-analytics alert',
'rum alert', 'composite']),
name=dict(required=True),
query=dict(),
notification_message=dict(no_log=True),
silenced=dict(type='dict'),
notify_no_data=dict(default=False, type='bool'),
no_data_timeframe=dict(),
timeout_h=dict(),
renotify_interval=dict(),
escalation_message=dict(),
notify_audit=dict(default=False, type='bool'),
thresholds=dict(type='dict', default=None),
tags=dict(type='list', elements='str', default=None),
locked=dict(default=False, type='bool'),
require_full_window=dict(type='bool'),
new_host_delay=dict(),
evaluation_delay=dict(),
id=dict(),
include_tags=dict(required=False, default=True, type='bool'),
priority=dict(type='int'),
notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']),
renotify_occurrences=dict(type='int'),
renotify_statuses=dict(type='list', elements='str', choices=['alert', 'warn', 'no data']),
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
options = {
'api_key': module.params['api_key'],
'api_host': module.params['api_host'],
'app_key': module.params['app_key']
}
initialize(**options)
# Check if api_key and app_key is correct or not
# if not, then fail here.
response = api.Monitor.get_all()
if isinstance(response, dict):
msg = response.get('errors', None)
if msg:
module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
if module.params['state'] == 'present':
install_monitor(module)
elif module.params['state'] == 'absent':
delete_monitor(module)
elif module.params['state'] == 'mute':
mute_monitor(module)
elif module.params['state'] == 'unmute':
unmute_monitor(module)
def _fix_template_vars(message):
if message:
return message.replace('[[', '{{').replace(']]', '}}')
return message
def _get_monitor(module):
if module.params['id'] is not None:
monitor = api.Monitor.get(module.params['id'])
if 'errors' in monitor:
module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
return monitor
else:
monitors = api.Monitor.get_all()
for monitor in monitors:
if monitor['name'] == _fix_template_vars(module.params['name']):
return monitor
return {}
def _post_monitor(module, options):
try:
kwargs = dict(type=module.params['type'], query=module.params['query'],
name=_fix_template_vars(module.params['name']),
message=_fix_template_vars(module.params['notification_message']),
escalation_message=_fix_template_vars(module.params['escalation_message']),
priority=module.params['priority'],
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.create(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_monitor(module, monitor, options):
try:
kwargs = dict(id=monitor['id'], query=module.params['query'],
name=_fix_template_vars(module.params['name']),
message=_fix_template_vars(module.params['notification_message']),
escalation_message=_fix_template_vars(module.params['escalation_message']),
priority=module.params['priority'],
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.update(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def install_monitor(module):
options = {
"silenced": module.params['silenced'],
"notify_no_data": module.boolean(module.params['notify_no_data']),
"no_data_timeframe": module.params['no_data_timeframe'],
"timeout_h": module.params['timeout_h'],
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
"locked": module.boolean(module.params['locked']),
"require_full_window": module.params['require_full_window'],
"new_host_delay": module.params['new_host_delay'],
"evaluation_delay": module.params['evaluation_delay'],
"include_tags": module.params['include_tags'],
"notification_preset_name": module.params['notification_preset_name'],
"renotify_occurrences": module.params['renotify_occurrences'],
"renotify_statuses": module.params['renotify_statuses'],
}
if module.params['type'] == "service check":
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
if not monitor:
_post_monitor(module, options)
else:
_update_monitor(module, monitor, options)
def delete_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.exit_json(changed=False)
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def mute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
msg = api.Monitor.mute(id=monitor['id'])
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def unmute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']:
module.exit_json(changed=False)
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,490 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Branko Majic <branko@majic.rs>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: dconf
author:
- "Branko Majic (@azaghal)"
short_description: Modify and read dconf database
description:
- This module allows modifications and reading of C(dconf) database. The module
is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man
page for more details.
- Since C(dconf) requires a running D-Bus session to change values, the module
will try to detect an existing session and reuse it, or run the tool via
C(dbus-run-session).
requirements:
- Optionally the C(gi.repository) Python library (usually included in the OS
on hosts which have C(dconf)); this will become a non-optional requirement
in a future major release of community.general.
notes:
- This module depends on C(psutil) Python library (version 4.0.0 and upwards),
C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
distribution you are using, you may need to install additional packages to
have these available.
- This module uses the C(gi.repository) Python library when available for
accurate comparison of values in C(dconf) to values specified in Ansible
code. C(gi.repository) is likely to be present on most systems which have
C(dconf) but may not be present everywhere. When it is missing, a simple
string comparison between values is used, and there may be false positives,
that is, Ansible may think that a value is being changed when it is not.
This fallback will be removed in a future version of this module, at which
point the module will stop working on hosts without C(gi.repository).
- Detection of existing, running D-Bus session, required to change settings
via C(dconf), is not 100% reliable due to implementation details of D-Bus
daemon itself. This might lead to running applications not picking-up
changes on the fly if options are changed via Ansible and
C(dbus-run-session).
- Keep in mind that the C(dconf) CLI tool, which this module wraps around,
utilises an unusual syntax for the values (GVariant). For example, if you
wanted to provide a string value, the correct syntax would be
O(value="'myvalue'") - with single quotes as part of the Ansible parameter
value.
- When using loops in combination with a value like
V("[('xkb', 'us'\), ('xkb', 'se'\)]"), you need to be aware of possible
type conversions. Applying a filter V({{ item.value | string }})
to the parameter variable can avoid potential conversion problems.
- The easiest way to figure out exact syntax/value you need to provide for a
key is by making the configuration change in application affected by the
key, and then having a look at value set via commands C(dconf dump
/path/to/dir/) or C(dconf read /path/to/key).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
key:
type: str
required: true
description:
- A dconf key to modify or read from the dconf database.
value:
type: raw
required: false
description:
- Value to set for the specified dconf key. Value should be specified in
GVariant format. Due to complexity of this format, it is best to have a
look at existing values in the dconf database.
- Required for O(state=present).
- Although the type is specified as "raw", it should typically be
specified as a string. However, boolean values in particular are
handled properly even when specified as booleans rather than strings
(in fact, handling booleans properly is why the type of this parameter
is "raw").
state:
type: str
required: false
default: present
choices: [ 'read', 'present', 'absent' ]
description:
- The action to take upon the key/value.
'''
RETURN = r"""
value:
description: value associated with the requested key
returned: success, state was "read"
type: str
sample: "'Default'"
"""
EXAMPLES = r"""
- name: Configure available keyboard layouts in Gnome
community.general.dconf:
key: "/org/gnome/desktop/input-sources/sources"
value: "[('xkb', 'us'), ('xkb', 'se')]"
state: present
- name: Read currently available keyboard layouts in Gnome
community.general.dconf:
key: "/org/gnome/desktop/input-sources/sources"
state: read
register: keyboard_layouts
- name: Reset the available keyboard layouts in Gnome
community.general.dconf:
key: "/org/gnome/desktop/input-sources/sources"
state: absent
- name: Configure available keyboard layouts in Cinnamon
community.general.dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
value: "['us', 'se']"
state: present
- name: Read currently available keyboard layouts in Cinnamon
community.general.dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
state: read
register: keyboard_layouts
- name: Reset the available keyboard layouts in Cinnamon
community.general.dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
state: absent
- name: Disable desktop effects in Cinnamon
community.general.dconf:
key: "/org/cinnamon/desktop-effects"
value: "false"
state: present
"""
import os
import sys
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.respawn import (
has_respawned,
probe_interpreters_for_module,
respawn_module,
)
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils import deps
glib_module_name = 'gi.repository.GLib'
try:
from gi.repository.GLib import Variant, GError
except ImportError:
Variant = None
GError = AttributeError
with deps.declare("psutil"):
import psutil
class DBusWrapper(object):
"""
Helper class that can be used for running a command with a working D-Bus
session.
If possible, command will be run against an existing D-Bus session,
otherwise the session will be spawned via dbus-run-session.
Example usage:
dbus_wrapper = DBusWrapper(ansible_module)
dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
"""
def __init__(self, module):
"""
Initialises an instance of the class.
:param module: Ansible module instance used to signal failures and run commands.
:type module: AnsibleModule
"""
# Store passed-in arguments and set-up some defaults.
self.module = module
# Try to extract existing D-Bus session address.
self.dbus_session_bus_address = self._get_existing_dbus_session()
# If no existing D-Bus session was detected, check if dbus-run-session
# is available.
if self.dbus_session_bus_address is None:
self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True)
def _get_existing_dbus_session(self):
"""
Detects and returns an existing D-Bus session bus address.
:returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
"""
# We'll be checking the processes of current user only.
uid = os.getuid()
# Go through all the pids for this user, try to extract the D-Bus
# session bus address from environment, and ensure it is possible to
# connect to it.
self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
for pid in psutil.pids():
try:
process = psutil.Process(pid)
process_real_uid, dummy, dummy = process.uids()
if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True)
command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
rc, dummy, dummy = self.module.run_command(command)
if rc == 0:
self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
return dbus_session_bus_address_candidate
# This can happen with things like SSH sessions etc.
except psutil.AccessDenied:
pass
# Process has disappeared while inspecting it
except psutil.NoSuchProcess:
pass
self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
return None
def run_command(self, command):
"""
Runs the specified command within a functional D-Bus session. Command is
effectively passed-on to AnsibleModule.run_command() method, with
modification for using dbus-run-session if necessary.
:param command: Command to run, including parameters. Each element of the list should be a string.
:type module: list
:returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
"""
if self.dbus_session_bus_address is None:
self.module.debug("Using dbus-run-session wrapper for running commands.")
command = [self.dbus_run_session_cmd] + command
rc, out, err = self.module.run_command(command)
if self.dbus_session_bus_address is None and rc == 127:
self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
else:
extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
rc, out, err = self.module.run_command(command, environ_update=extra_environment)
return rc, out, err
class DconfPreference(object):
def __init__(self, module, check_mode=False):
"""
Initialises instance of the class.
:param module: Ansible module instance used to signal failures and run commands.
:type module: AnsibleModule
:param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
:type check_mode: bool
"""
self.module = module
self.check_mode = check_mode
# Check if dconf binary exists
self.dconf_bin = self.module.get_bin_path('dconf', required=True)
@staticmethod
def variants_are_equal(canonical_value, user_value):
"""Compare two string GVariant representations for equality.
Assumes `canonical_value` is "canonical" in the sense that the type of
the variant is specified explicitly if it cannot be inferred; this is
true for textual representations of variants generated by the `dconf`
command. The type of `canonical_value` is used to parse `user_value`,
so the latter does not need to be explicitly typed.
Returns True if the two values are equal.
"""
if canonical_value is None:
# It's unset in dconf database, so anything the user is trying to
# set is a change.
return False
try:
variant1 = Variant.parse(None, canonical_value)
variant2 = Variant.parse(variant1.get_type(), user_value)
return variant1 == variant2
except GError:
return canonical_value == user_value
def read(self, key):
"""
Retrieves current value associated with the dconf key.
If an error occurs, a call will be made to AnsibleModule.fail_json.
:returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
"""
command = [self.dconf_bin, "read", key]
rc, out, err = self.module.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err,
out=out,
err=err)
if out == '':
value = None
else:
value = out.rstrip('\n')
return value
def write(self, key, value):
"""
Writes the value for specified key.
If an error occurs, a call will be made to AnsibleModule.fail_json.
:param key: dconf key for which the value should be set. Should be a full path.
:type key: str
:param value: Value to set for the specified dconf key. Should be specified in GVariant format.
:type value: str
:returns: bool -- True if a change was made, False if no change was required.
"""
# If no change is needed (or won't be done due to check_mode), notify
# caller straight away.
if self.variants_are_equal(self.read(key), value):
return False
elif self.check_mode:
return True
# Set-up command to run. Since DBus is needed for write operation, wrap
# dconf command dbus-launch.
command = [self.dconf_bin, "write", key, value]
# Run the command and fetch standard return code, stdout, and stderr.
dbus_wrapper = DBusWrapper(self.module)
rc, out, err = dbus_wrapper.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err),
out=out,
err=err)
# Value was changed.
return True
def reset(self, key):
"""
Returns value for the specified key (removes it from user configuration).
If an error occurs, a call will be made to AnsibleModule.fail_json.
:param key: dconf key to reset. Should be a full path.
:type key: str
:returns: bool -- True if a change was made, False if no change was required.
"""
# Read the current value first.
current_value = self.read(key)
# No change was needed, key is not set at all, or just notify user if we
# are in check mode.
if current_value is None:
return False
elif self.check_mode:
return True
# Set-up command to run. Since DBus is needed for reset operation, wrap
# dconf command dbus-launch.
command = [self.dconf_bin, "reset", key]
# Run the command and fetch standard return code, stdout, and stderr.
dbus_wrapper = DBusWrapper(self.module)
rc, out, err = dbus_wrapper.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while resetting the value with error: %s' % err,
out=out,
err=err)
# Value was changed.
return True
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent', 'read']),
key=dict(required=True, type='str', no_log=False),
# Converted to str below after special handling of bool.
value=dict(required=False, default=None, type='raw'),
),
supports_check_mode=True,
required_if=[
('state', 'present', ['value']),
],
)
if Variant is None:
# This interpreter can't see the GLib module. To try to fix that, we'll
# look in common locations for system-owned interpreters that can see
# it; if we find one, we'll respawn under it. Otherwise we'll proceed
# with degraded performance, without the ability to parse GVariants.
# Later (in a different PR) we'll actually deprecate this degraded
# performance level and fail with an error if the library can't be
# found.
if has_respawned():
# This shouldn't be possible; short-circuit early if it happens.
module.fail_json(
msg="%s must be installed and visible from %s." %
(glib_module_name, sys.executable))
interpreters = ['/usr/bin/python3', '/usr/bin/python2',
'/usr/bin/python']
interpreter = probe_interpreters_for_module(
interpreters, glib_module_name)
if interpreter:
# Found the Python bindings; respawn this module under the
# interpreter where we found them.
respawn_module(interpreter)
# This is the end of the line for this process, it will exit here
# once the respawned module has completed.
# Try to be forgiving about the user specifying a boolean as the value, or
# more accurately about the fact that YAML and Ansible are quite insistent
# about converting strings that look like booleans into booleans. Convert
# the boolean into a string of the type dconf will understand. Any type for
# the value other than boolean is just converted into a string directly.
if module.params['value'] is not None:
if isinstance(module.params['value'], bool):
module.params['value'] = 'true' if module.params['value'] else 'false'
else:
module.params['value'] = to_native(
module.params['value'], errors='surrogate_or_strict')
if Variant is None:
module.warn(
'WARNING: The gi.repository Python library is not available; '
'using string comparison to check value equality. This fallback '
'will be deprecated in a future version of community.general.')
deps.validate(module)
# Create wrapper instance.
dconf = DconfPreference(module, module.check_mode)
# Process based on different states.
if module.params['state'] == 'read':
value = dconf.read(module.params['key'])
module.exit_json(changed=False, value=value)
elif module.params['state'] == 'present':
changed = dconf.write(module.params['key'], module.params['value'])
module.exit_json(changed=changed)
elif module.params['state'] == 'absent':
changed = dconf.reset(module.params['key'])
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,536 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
# Copyright (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: deploy_helper
author: "Ramon de la Fuente (@ramondelafuente)"
short_description: Manages some of the steps common in deploying projects
description:
- The Deploy Helper manages some of the steps common in deploying software.
It creates a folder structure, manages a symlink for the current release
and cleans up old releases.
# TODO: convert below to RETURN documentation!
- "Running it with the O(state=query) or O(state=present) will return the C(deploy_helper) fact.
C(project_path), whatever you set in the O(path) parameter,
C(current_path), the path to the symlink that points to the active release,
C(releases_path), the path to the folder to keep releases in,
C(shared_path), the path to the folder to keep shared resources in,
C(unfinished_filename), the file to check for to recognize unfinished builds,
C(previous_release), the release the 'current' symlink is pointing to,
C(previous_release_path), the full path to the 'current' symlink target,
C(new_release), either the 'release' parameter or a generated timestamp,
C(new_release_path), the path to the new release folder (not created by the module)."
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
path:
type: path
required: true
aliases: ['dest']
description:
- The root path of the project.
Returned in the C(deploy_helper.project_path) fact.
state:
type: str
description:
- The state of the project.
- V(query) will only gather facts.
- V(present) will create the project C(root) folder, and in it the C(releases) and C(shared) folders.
- V(finalize) will remove the unfinished_filename file, create a symlink to the newly
deployed release and optionally clean old releases.
- V(clean) will remove failed & old releases.
- V(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)).
choices: [ present, finalize, absent, clean, query ]
default: present
release:
type: str
description:
- The release version that is being deployed. Defaults to a timestamp format C(%Y%m%d%H%M%S) (for example V(20141119223359)).
This parameter is optional during O(state=present), but needs to be set explicitly for O(state=finalize).
You can use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
type: str
description:
- The name of the folder that will hold the releases. This can be relative to O(path) or absolute.
Returned in the C(deploy_helper.releases_path) fact.
default: releases
shared_path:
type: path
description:
- The name of the folder that will hold the shared resources. This can be relative to O(path) or absolute.
If this is set to an empty string, no shared folder will be created.
Returned in the C(deploy_helper.shared_path) fact.
default: shared
current_path:
type: path
description:
- The name of the symlink that is created when the deploy is finalized. Used in O(state=finalize) and O(state=clean).
Returned in the C(deploy_helper.current_path) fact.
default: current
unfinished_filename:
type: str
description:
- The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that
contain this file will be deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is
automatically deleted from the C(new_release_path) during O(state=finalize).
default: DEPLOY_UNFINISHED
clean:
description:
- Whether to run the clean procedure in case of O(state=finalize).
type: bool
default: true
keep_releases:
type: int
description:
- The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds
will be deleted first, so only correct releases will count. The current version will not count.
default: 5
notes:
- Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
new naming strategy without problems.
- Because of the default behaviour of generating the C(new_release) fact, this module will not be idempotent
unless you pass your own release name with O(release). Due to the nature of deploying software, this should not
be much of a problem.
extends_documentation_fragment:
- ansible.builtin.files
- community.general.attributes
'''
EXAMPLES = '''
# General explanation, starting with an example folder structure for a project:
# root:
# releases:
# - 20140415234508
# - 20140415235146
# - 20140416082818
#
# shared:
# - sessions
# - uploads
#
# current: releases/20140416082818
# The 'releases' folder holds all the available releases. A release is a complete build of the application being
# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
# git tags or commit hashes.
#
# During a deploy, a new folder should be created in the releases folder and any build steps required should be
# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
# with a link to this build.
#
# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
#
# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
# release is reduced to the time it takes to switch the link.
#
# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
# procedure to remove it during cleanup.
# Typical usage
- name: Initialize the deploy root and gather facts
community.general.deploy_helper:
path: /path/to/root
- name: Clone the project to the new release folder
ansible.builtin.git:
repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
dest: '{{ deploy_helper.new_release_path }}'
version: v1.1.1
- name: Add an unfinished file, to allow cleanup on successful finalize
ansible.builtin.file:
path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
state: touch
- name: Perform some build steps, like running your dependency manager for example
composer:
command: install
working_dir: '{{ deploy_helper.new_release_path }}'
- name: Create some folders in the shared folder
ansible.builtin.file:
path: '{{ deploy_helper.shared_path }}/{{ item }}'
state: directory
with_items:
- sessions
- uploads
- name: Add symlinks from the new release to the shared folder
ansible.builtin.file:
path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
state: link
with_items:
- path: app/sessions
src: sessions
- path: web/uploads
src: uploads
- name: Finalize the deploy, removing the unfinished file and switching the symlink
community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Retrieving facts before running a deploy
- name: Run 'state=query' to gather facts without changing anything
community.general.deploy_helper:
path: /path/to/root
state: query
# Remember to set the 'release' parameter when you actually call 'state=present' later
- name: Initialize the deploy root
community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: present
# all paths can be absolute or relative (to the 'path' parameter)
- community.general.deploy_helper:
path: /path/to/root
releases_path: /var/www/project/releases
shared_path: /var/www/shared
current_path: /var/www/active
# Using your own naming strategy for releases (a version tag in this case):
- community.general.deploy_helper:
path: /path/to/root
release: v1.1.1
state: present
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Using a different unfinished_filename:
- community.general.deploy_helper:
path: /path/to/root
unfinished_filename: README.md
release: '{{ deploy_helper.new_release }}'
state: finalize
# Postponing the cleanup of older builds:
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
clean: false
- community.general.deploy_helper:
path: /path/to/root
state: clean
# Or running the cleanup ahead of the new deploy
- community.general.deploy_helper:
path: /path/to/root
state: clean
- community.general.deploy_helper:
path: /path/to/root
state: present
# Keeping more old releases:
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
keep_releases: 10
# Or, if you use 'clean=false' on finalize:
- community.general.deploy_helper:
path: /path/to/root
state: clean
keep_releases: 10
# Removing the entire project root folder
- community.general.deploy_helper:
path: /path/to/root
state: absent
# Debugging the facts returned by the module
- community.general.deploy_helper:
path: /path/to/root
- ansible.builtin.debug:
var: deploy_helper
'''
import os
import shutil
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
class DeployHelper(object):
def __init__(self, module):
self.module = module
self.file_args = module.load_file_common_arguments(module.params)
self.clean = module.params['clean']
self.current_path = module.params['current_path']
self.keep_releases = module.params['keep_releases']
self.path = module.params['path']
self.release = module.params['release']
self.releases_path = module.params['releases_path']
self.shared_path = module.params['shared_path']
self.state = module.params['state']
self.unfinished_filename = module.params['unfinished_filename']
def gather_facts(self):
current_path = os.path.join(self.path, self.current_path)
releases_path = os.path.join(self.path, self.releases_path)
if self.shared_path:
shared_path = os.path.join(self.path, self.shared_path)
else:
shared_path = None
previous_release, previous_release_path = self._get_last_release(current_path)
if not self.release and (self.state == 'query' or self.state == 'present'):
self.release = time.strftime("%Y%m%d%H%M%S")
if self.release:
new_release_path = os.path.join(releases_path, self.release)
else:
new_release_path = None
return {
'project_path': self.path,
'current_path': current_path,
'releases_path': releases_path,
'shared_path': shared_path,
'previous_release': previous_release,
'previous_release_path': previous_release_path,
'new_release': self.release,
'new_release_path': new_release_path,
'unfinished_filename': self.unfinished_filename
}
def delete_path(self, path):
if not os.path.lexists(path):
return False
if not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
if not self.module.check_mode:
try:
shutil.rmtree(path, ignore_errors=False)
except Exception as e:
self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
return True
def create_path(self, path):
changed = False
if not os.path.lexists(path):
changed = True
if not self.module.check_mode:
os.makedirs(path)
elif not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
return changed
def check_link(self, path):
if os.path.lexists(path):
if not os.path.islink(path):
self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
def create_link(self, source, link_name):
if os.path.islink(link_name):
norm_link = os.path.normpath(os.path.realpath(link_name))
norm_source = os.path.normpath(os.path.realpath(source))
if norm_link == norm_source:
changed = False
else:
changed = True
if not self.module.check_mode:
if not os.path.lexists(source):
self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
tmp_link_name = link_name + '.' + self.unfinished_filename
if os.path.islink(tmp_link_name):
os.unlink(tmp_link_name)
os.symlink(source, tmp_link_name)
os.rename(tmp_link_name, link_name)
else:
changed = True
if not self.module.check_mode:
os.symlink(source, link_name)
return changed
def remove_unfinished_file(self, new_release_path):
changed = False
unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
if os.path.lexists(unfinished_file_path):
changed = True
if not self.module.check_mode:
os.remove(unfinished_file_path)
return changed
def remove_unfinished_builds(self, releases_path):
changes = 0
for release in os.listdir(releases_path):
if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
if self.module.check_mode:
changes += 1
else:
changes += self.delete_path(os.path.join(releases_path, release))
return changes
def remove_unfinished_link(self, path):
changed = False
if not self.release:
return changed
tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
if not self.module.check_mode and os.path.exists(tmp_link_name):
changed = True
os.remove(tmp_link_name)
return changed
def cleanup(self, releases_path, reserve_version):
changes = 0
if os.path.lexists(releases_path):
releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
try:
releases.remove(reserve_version)
except ValueError:
pass
if not self.module.check_mode:
releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
for release in releases[self.keep_releases:]:
changes += self.delete_path(os.path.join(releases_path, release))
elif len(releases) > self.keep_releases:
changes += (len(releases) - self.keep_releases)
return changes
def _get_file_args(self, path):
file_args = self.file_args.copy()
file_args['path'] = path
return file_args
def _get_last_release(self, current_path):
previous_release = None
previous_release_path = None
if os.path.lexists(current_path):
previous_release_path = os.path.realpath(current_path)
previous_release = os.path.basename(previous_release_path)
return previous_release, previous_release_path
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(aliases=['dest'], required=True, type='path'),
release=dict(type='str'),
releases_path=dict(type='str', default='releases'),
shared_path=dict(type='path', default='shared'),
current_path=dict(type='path', default='current'),
keep_releases=dict(type='int', default=5),
clean=dict(type='bool', default=True),
unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'),
state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
),
required_if=[
('state', 'finalize', ['release']),
],
add_file_common_args=True,
supports_check_mode=True
)
deploy_helper = DeployHelper(module)
facts = deploy_helper.gather_facts()
result = {
'state': deploy_helper.state
}
changes = 0
if deploy_helper.state == 'query':
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'present':
deploy_helper.check_link(facts['current_path'])
changes += deploy_helper.create_path(facts['project_path'])
changes += deploy_helper.create_path(facts['releases_path'])
if deploy_helper.shared_path:
changes += deploy_helper.create_path(facts['shared_path'])
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'finalize':
if deploy_helper.keep_releases <= 0:
module.fail_json(msg="'keep_releases' should be at least 1")
changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
if deploy_helper.clean:
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'clean':
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'absent':
# destroy the facts
result['ansible_facts'] = {'deploy_helper': []}
changes += deploy_helper.delete_path(facts['project_path'])
if changes > 0:
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,303 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Dimension Data
# Authors:
# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
# - Adam Friedman <tintoy@tintoy.io>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: dimensiondata_network
short_description: Create, update, and delete MCP 1.0 & 2.0 networks
extends_documentation_fragment:
- community.general.dimensiondata
- community.general.dimensiondata_wait
- community.general.attributes
description:
- Create, update, and delete MCP 1.0 & 2.0 networks
author: 'Aimon Bustardo (@aimonb)'
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- The name of the network domain to create.
required: true
type: str
description:
description:
- Additional description of the network domain.
required: false
type: str
service_plan:
description:
- The service plan, either "ESSENTIALS" or "ADVANCED".
- MCP 2.0 Only.
choices: [ESSENTIALS, ADVANCED]
default: ESSENTIALS
type: str
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
'''
EXAMPLES = '''
- name: Create an MCP 1.0 network
community.general.dimensiondata_network:
region: na
location: NA5
name: mynet
- name: Create an MCP 2.0 network
community.general.dimensiondata_network:
region: na
mcp_user: my_user
mcp_password: my_password
location: NA9
name: mynet
service_plan: ADVANCED
- name: Delete a network
community.general.dimensiondata_network:
region: na
location: NA1
name: mynet
state: absent
'''
RETURN = '''
network:
description: Dictionary describing the network.
returned: On success when O(state=present).
type: complex
contains:
id:
description: Network ID.
type: str
sample: "8c787000-a000-4050-a215-280893411a7d"
name:
description: Network name.
type: str
sample: "My network"
description:
description: Network description.
type: str
sample: "My network description"
location:
description: Datacenter location.
type: str
sample: NA3
status:
description: Network status. (MCP 2.0 only)
type: str
sample: NORMAL
private_net:
description: Private network subnet. (MCP 1.0 only)
type: str
sample: "10.2.3.0"
multicast:
description: Multicast enabled? (MCP 1.0 only)
type: bool
sample: false
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
from ansible.module_utils.common.text.converters import to_native
if HAS_LIBCLOUD:
from libcloud.compute.base import NodeLocation
from libcloud.common.dimensiondata import DimensionDataAPIException
class DimensionDataNetworkModule(DimensionDataModule):
"""
The dimensiondata_network module for Ansible.
"""
def __init__(self):
"""
Create a new Dimension Data network module.
"""
super(DimensionDataNetworkModule, self).__init__(
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(type='str', required=True),
description=dict(type='str', required=False),
service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
state=dict(default='present', choices=['present', 'absent'])
),
required_together=DimensionDataModule.required_together()
)
)
self.name = self.module.params['name']
self.description = self.module.params['description']
self.service_plan = self.module.params['service_plan']
self.state = self.module.params['state']
def state_present(self):
network = self._get_network()
if network:
self.module.exit_json(
changed=False,
msg='Network already exists',
network=self._network_to_dict(network)
)
network = self._create_network()
self.module.exit_json(
changed=True,
msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
network=self._network_to_dict(network)
)
def state_absent(self):
network = self._get_network()
if not network:
self.module.exit_json(
changed=False,
msg='Network "%s" does not exist' % self.name,
network=self._network_to_dict(network)
)
self._delete_network(network)
def _get_network(self):
if self.mcp_version == '1.0':
networks = self.driver.list_networks(location=self.location)
else:
networks = self.driver.ex_list_network_domains(location=self.location)
matched_network = [network for network in networks if network.name == self.name]
if matched_network:
return matched_network[0]
return None
def _network_to_dict(self, network):
network_dict = dict(
id=network.id,
name=network.name,
description=network.description
)
if isinstance(network.location, NodeLocation):
network_dict['location'] = network.location.id
else:
network_dict['location'] = network.location
if self.mcp_version == '1.0':
network_dict['private_net'] = network.private_net
network_dict['multicast'] = network.multicast
network_dict['status'] = None
else:
network_dict['private_net'] = None
network_dict['multicast'] = None
network_dict['status'] = network.status
return network_dict
def _create_network(self):
# Make sure service_plan argument is defined
if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
self.module.fail_json(
msg='service_plan required when creating network and location is MCP 2.0'
)
# Create network
try:
if self.mcp_version == '1.0':
network = self.driver.ex_create_network(
self.location,
self.name,
description=self.description
)
else:
network = self.driver.ex_create_network_domain(
self.location,
self.name,
self.module.params['service_plan'],
description=self.description
)
except DimensionDataAPIException as e:
self.module.fail_json(
msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
)
if self.module.params['wait'] is True:
network = self._wait_for_network_state(network.id, 'NORMAL')
return network
def _delete_network(self, network):
try:
if self.mcp_version == '1.0':
deleted = self.driver.ex_delete_network(network)
else:
deleted = self.driver.ex_delete_network_domain(network)
if deleted:
self.module.exit_json(
changed=True,
msg="Deleted network with id %s" % network.id
)
self.module.fail_json(
"Unexpected failure deleting network with id %s" % network.id
)
except DimensionDataAPIException as e:
self.module.fail_json(
msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
)
def _wait_for_network_state(self, net_id, state_to_wait_for):
try:
return self.driver.connection.wait_for_state(
state_to_wait_for,
self.driver.ex_get_network_domain,
self.module.params['wait_poll_interval'],
self.module.params['wait_time'],
net_id
)
except DimensionDataAPIException as e:
self.module.fail_json(
msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
exception=traceback.format_exc()
)
def main():
module = DimensionDataNetworkModule()
if module.state == 'present':
module.state_present()
elif module.state == 'absent':
module.state_absent()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,564 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Dimension Data
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Authors:
# - Adam Friedman <tintoy@tintoy.io>
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: dimensiondata_vlan
short_description: Manage a VLAN in a Cloud Control network domain
extends_documentation_fragment:
- community.general.dimensiondata
- community.general.dimensiondata_wait
- community.general.attributes
description:
- Manage VLANs in Cloud Control network domains.
author: 'Adam Friedman (@tintoy)'
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- The name of the target VLAN.
type: str
required: true
description:
description:
- A description of the VLAN.
type: str
default: ''
network_domain:
description:
- The Id or name of the target network domain.
required: true
type: str
private_ipv4_base_address:
description:
- The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
type: str
default: ''
private_ipv4_prefix_size:
description:
- The size of the IPv4 address space, e.g 24.
- Required, if O(private_ipv4_base_address) is specified.
type: int
default: 0
state:
description:
- The desired state for the target VLAN.
- V(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
choices: [present, absent, readonly]
default: present
type: str
allow_expand:
description:
- Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
- If V(false), the module will fail under these conditions.
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
type: bool
default: false
'''
EXAMPLES = '''
- name: Add or update VLAN
community.general.dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
description: A test VLAN
private_ipv4_base_address: 192.168.23.0
private_ipv4_prefix_size: 24
state: present
wait: true
- name: Read / get VLAN details
community.general.dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
state: readonly
wait: true
- name: Delete a VLAN
community.general.dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan_1
state: absent
wait: true
'''
RETURN = '''
vlan:
description: Dictionary describing the VLAN.
returned: On success when O(state=present)
type: complex
contains:
id:
description: VLAN ID.
type: str
sample: "aaaaa000-a000-4050-a215-2808934ccccc"
name:
description: VLAN name.
type: str
sample: "My VLAN"
description:
description: VLAN description.
type: str
sample: "My VLAN description"
location:
description: Datacenter location.
type: str
sample: NA3
private_ipv4_base_address:
description: The base address for the VLAN's private IPV4 network.
type: str
sample: 192.168.23.0
private_ipv4_prefix_size:
description: The prefix size for the VLAN's private IPV4 network.
type: int
sample: 24
private_ipv4_gateway_address:
description: The gateway address for the VLAN's private IPV4 network.
type: str
sample: 192.168.23.1
private_ipv6_base_address:
description: The base address for the VLAN's IPV6 network.
type: str
sample: 2402:9900:111:1195:0:0:0:0
private_ipv6_prefix_size:
description: The prefix size for the VLAN's IPV6 network.
type: int
sample: 64
private_ipv6_gateway_address:
description: The gateway address for the VLAN's IPV6 network.
type: str
sample: 2402:9900:111:1195:0:0:0:1
status:
description: VLAN status.
type: str
sample: NORMAL
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
try:
from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
HAS_LIBCLOUD = True
except ImportError:
DimensionDataVlan = None
HAS_LIBCLOUD = False
class DimensionDataVlanModule(DimensionDataModule):
"""
The dimensiondata_vlan module for Ansible.
"""
def __init__(self):
"""
Create a new Dimension Data VLAN module.
"""
super(DimensionDataVlanModule, self).__init__(
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(required=True, type='str'),
description=dict(default='', type='str'),
network_domain=dict(required=True, type='str'),
private_ipv4_base_address=dict(default='', type='str'),
private_ipv4_prefix_size=dict(default=0, type='int'),
allow_expand=dict(required=False, default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent', 'readonly'])
),
required_together=DimensionDataModule.required_together()
)
)
self.name = self.module.params['name']
self.description = self.module.params['description']
self.network_domain_selector = self.module.params['network_domain']
self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
self.state = self.module.params['state']
self.allow_expand = self.module.params['allow_expand']
if self.wait and self.state != 'present':
self.module.fail_json(
msg='The wait parameter is only supported when state is "present".'
)
def state_present(self):
"""
Ensure that the target VLAN is present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
self.name, self.network_domain_selector
),
changed=True
)
vlan = self._create_vlan(network_domain)
self.module.exit_json(
msg='Created VLAN "{0}" in network domain "{1}".'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
else:
diff = VlanDiff(vlan, self.module.params)
if not diff.has_changes():
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=False
)
return
try:
diff.ensure_legal_change()
except InvalidVlanChangeError as invalid_vlan_change:
self.module.fail_json(
msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
self.name, self.network_domain_selector, invalid_vlan_change
)
)
if diff.needs_expand() and not self.allow_expand:
self.module.fail_json(
msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
self.private_ipv4_prefix_size
) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
vlan.private_ipv4_range_size
) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
)
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
if diff.needs_edit():
vlan.name = self.name
vlan.description = self.description
self.driver.ex_update_vlan(vlan)
if diff.needs_expand():
vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
self.driver.ex_expand_vlan(vlan)
self.module.exit_json(
msg='Updated VLAN "{0}" in network domain "{1}".'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
def state_readonly(self):
"""
Read the target VLAN's state.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if vlan:
self.module.exit_json(
vlan=vlan_to_dict(vlan),
changed=False
)
else:
self.module.fail_json(
msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
self.name, self.network_domain_selector
)
)
def state_absent(self):
"""
Ensure that the target VLAN is not present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
self.module.exit_json(
msg='VLAN "{0}" is absent from network domain "{1}".'.format(
self.name, self.network_domain_selector
),
changed=False
)
return
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
self._delete_vlan(vlan)
self.module.exit_json(
msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
self.name, self.network_domain_selector
),
changed=True
)
def _get_vlan(self, network_domain):
"""
Retrieve the target VLAN details from CloudControl.
:param network_domain: The target network domain.
:return: The VLAN, or None if the target VLAN was not found.
:rtype: DimensionDataVlan
"""
vlans = self.driver.ex_list_vlans(
location=self.location,
network_domain=network_domain
)
matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
if matching_vlans:
return matching_vlans[0]
return None
def _create_vlan(self, network_domain):
vlan = self.driver.ex_create_vlan(
network_domain,
self.name,
self.private_ipv4_base_address,
self.description,
self.private_ipv4_prefix_size
)
if self.wait:
vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
return vlan
def _delete_vlan(self, vlan):
try:
self.driver.ex_delete_vlan(vlan)
# Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
if self.wait:
self._wait_for_vlan_state(vlan, 'NOT_FOUND')
except DimensionDataAPIException as api_exception:
self.module.fail_json(
msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
vlan.id, api_exception.msg
)
)
def _wait_for_vlan_state(self, vlan, state_to_wait_for):
network_domain = self._get_network_domain()
wait_poll_interval = self.module.params['wait_poll_interval']
wait_time = self.module.params['wait_time']
# Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
try:
return self.driver.connection.wait_for_state(
state_to_wait_for,
self.driver.ex_get_vlan,
wait_poll_interval,
wait_time,
vlan
)
except DimensionDataAPIException as api_exception:
if api_exception.code != 'RESOURCE_NOT_FOUND':
raise
return DimensionDataVlan(
id=vlan.id,
status='NOT_FOUND',
name='',
description='',
private_ipv4_range_address='',
private_ipv4_range_size=0,
ipv4_gateway='',
ipv6_range_address='',
ipv6_range_size=0,
ipv6_gateway='',
location=self.location,
network_domain=network_domain
)
def _get_network_domain(self):
"""
Retrieve the target network domain from the Cloud Control API.
:return: The network domain.
"""
try:
return self.get_network_domain(
self.network_domain_selector, self.location
)
except UnknownNetworkError:
self.module.fail_json(
msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
self.network_domain_selector, self.location
)
)
return None
class InvalidVlanChangeError(Exception):
"""
Error raised when an illegal change to VLAN state is attempted.
"""
pass
class VlanDiff(object):
"""
Represents differences between VLAN information (from CloudControl) and module parameters.
"""
def __init__(self, vlan, module_params):
"""
:param vlan: The VLAN information from CloudControl.
:type vlan: DimensionDataVlan
:param module_params: The module parameters.
:type module_params: dict
"""
self.vlan = vlan
self.module_params = module_params
self.name_changed = module_params['name'] != vlan.name
self.description_changed = module_params['description'] != vlan.description
self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
# Is configured prefix size greater than or less than the actual prefix size?
private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
def has_changes(self):
"""
Does the VlanDiff represent any changes between the VLAN and module configuration?
:return: True, if there are change changes; otherwise, False.
"""
return self.needs_edit() or self.needs_expand()
def ensure_legal_change(self):
"""
Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
- private_ipv4_base_address cannot be changed
- private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
:raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
"""
# Cannot change base address for private IPv4 network.
if self.private_ipv4_base_address_changed:
raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
# Cannot shrink private IPv4 network (by increasing prefix size).
if self.private_ipv4_prefix_size_increased:
raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
def needs_edit(self):
"""
Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
:return: True, if an Edit operation is required; otherwise, False.
"""
return self.name_changed or self.description_changed
def needs_expand(self):
"""
Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
The VLAN's network is expanded by reducing the size of its network prefix.
:return: True, if an Expand operation is required; otherwise, False.
"""
return self.private_ipv4_prefix_size_decreased
def vlan_to_dict(vlan):
return {
'id': vlan.id,
'name': vlan.name,
'description': vlan.description,
'location': vlan.location.id,
'private_ipv4_base_address': vlan.private_ipv4_range_address,
'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
'private_ipv4_gateway_address': vlan.ipv4_gateway,
'ipv6_base_address': vlan.ipv6_range_address,
'ipv6_prefix_size': vlan.ipv6_range_size,
'ipv6_gateway_address': vlan.ipv6_gateway,
'status': vlan.status
}
def main():
module = DimensionDataVlanModule()
if module.state == 'present':
module.state_present()
elif module.state == 'readonly':
module.state_readonly()
elif module.state == 'absent':
module.state_absent()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,223 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Christian Wollinger <cwollinger@web.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: discord
short_description: Send Discord messages
version_added: 3.1.0
description:
- Sends a message to a Discord channel using the Discord webhook API.
author: Christian Wollinger (@cwollinger)
seealso:
- name: API documentation
description: Documentation for Discord API
link: https://discord.com/developers/docs/resources/webhook#execute-webhook
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
webhook_id:
description:
- The webhook ID.
- "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
required: true
type: str
webhook_token:
description:
- The webhook token.
- "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
required: true
type: str
content:
description:
- Content of the message to the Discord channel.
- At least one of O(content) and O(embeds) must be specified.
type: str
username:
description:
- Overrides the default username of the webhook.
type: str
avatar_url:
description:
- Overrides the default avatar of the webhook.
type: str
tts:
description:
- Set this to V(true) if this is a TTS (Text to Speech) message.
type: bool
default: false
embeds:
description:
- Send messages as Embeds to the Discord channel.
- Embeds can have a colored border, embedded images, text fields and more.
- "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)"
- At least one of O(content) and O(embeds) must be specified.
type: list
elements: dict
'''
EXAMPLES = """
- name: Send a message to the Discord channel
community.general.discord:
webhook_id: "00000"
webhook_token: "XXXYYY"
content: "This is a message from ansible"
- name: Send a message to the Discord channel with specific username and avatar
community.general.discord:
webhook_id: "00000"
webhook_token: "XXXYYY"
content: "This is a message from ansible"
username: Ansible
avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
- name: Send a embedded message to the Discord channel
community.general.discord:
webhook_id: "00000"
webhook_token: "XXXYYY"
embeds:
- title: "Embedded message"
description: "This is an embedded message"
footer:
text: "Author: Ansible"
image:
url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
- name: Send two embedded messages
community.general.discord:
webhook_id: "00000"
webhook_token: "XXXYYY"
embeds:
- title: "First message"
description: "This is my first embedded message"
footer:
text: "Author: Ansible"
image:
url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
- title: "Second message"
description: "This is my first second message"
footer:
text: "Author: Ansible"
icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
fields:
- name: "Field 1"
value: "Value of my first field"
- name: "Field 2"
value: "Value of my second field"
timestamp: "{{ ansible_date_time.iso8601 }}"
"""
RETURN = """
http_code:
description:
- Response Code returned by Discord API.
returned: always
type: int
sample: 204
"""
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.basic import AnsibleModule
def discord_check_mode(module):
webhook_id = module.params['webhook_id']
webhook_token = module.params['webhook_token']
headers = {
'content-type': 'application/json'
}
url = "https://discord.com/api/webhooks/%s/%s" % (
webhook_id, webhook_token)
response, info = fetch_url(module, url, method='GET', headers=headers)
return response, info
def discord_text_msg(module):
webhook_id = module.params['webhook_id']
webhook_token = module.params['webhook_token']
content = module.params['content']
user = module.params['username']
avatar_url = module.params['avatar_url']
tts = module.params['tts']
embeds = module.params['embeds']
headers = {
'content-type': 'application/json'
}
url = "https://discord.com/api/webhooks/%s/%s" % (
webhook_id, webhook_token)
payload = {
'content': content,
'username': user,
'avatar_url': avatar_url,
'tts': tts,
'embeds': embeds,
}
payload = module.jsonify(payload)
response, info = fetch_url(module, url, data=payload, headers=headers, method='POST')
return response, info
def main():
module = AnsibleModule(
argument_spec=dict(
webhook_id=dict(type='str', required=True),
webhook_token=dict(type='str', required=True, no_log=True),
content=dict(type='str'),
username=dict(type='str'),
avatar_url=dict(type='str'),
tts=dict(type='bool', default=False),
embeds=dict(type='list', elements='dict'),
),
required_one_of=[['content', 'embeds']],
supports_check_mode=True
)
result = dict(
changed=False,
http_code='',
)
if module.check_mode:
response, info = discord_check_mode(module)
if info['status'] != 200:
try:
module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info)
except Exception:
module.fail_json(http_code=info['status'], msg=info['msg'], info=info)
else:
module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read()))
else:
response, info = discord_text_msg(module)
if info['status'] != 204:
try:
module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info)
except Exception:
module.fail_json(http_code=info['status'], msg=info['msg'], info=info)
else:
module.exit_json(msg=info['msg'], changed=True, http_code=info['status'])
if __name__ == "__main__":
main()

View File

@ -0,0 +1,113 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Alexei Znamensky <russoz@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: django_check
author:
- Alexei Znamensky (@russoz)
short_description: Wrapper for C(django-admin check)
version_added: 9.1.0
description:
- This module is a wrapper for the execution of C(django-admin check).
extends_documentation_fragment:
- community.general.attributes
- community.general.django
options:
database:
description:
- Specify databases to run checks against.
- If not specified, Django will not run database tests.
type: list
elements: str
deploy:
description:
- Include additional checks relevant in a deployment setting.
type: bool
default: false
fail_level:
description:
- Message level that will trigger failure.
- Default is the Django default value. Check the documentation for the version being used.
type: str
choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG]
tags:
description:
- Restrict checks to specific tags.
type: list
elements: str
apps:
description:
- Restrict checks to specific applications.
- Default is to check all applications.
type: list
elements: str
notes:
- The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc).
- The module will fail if RV(ignore:rc) is not zero.
attributes:
check_mode:
support: full
diff_mode:
support: none
"""
EXAMPLES = """
- name: Check the entire project
community.general.django_check:
settings: myproject.settings
- name: Create the project using specific databases
community.general.django_check:
database:
- somedb
- myotherdb
settings: fancysite.settings
pythonpath: /home/joedoe/project/fancysite
venv: /home/joedoe/project/fancysite/venv
"""
RETURN = """
run_info:
description: Command-line execution information.
type: dict
returned: success and C(verbosity) >= 3
"""
from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper
from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt
class DjangoCheck(DjangoModuleHelper):
module = dict(
argument_spec=dict(
database=dict(type="list", elements="str"),
deploy=dict(type="bool", default=False),
fail_level=dict(type="str", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]),
tags=dict(type="list", elements="str"),
apps=dict(type="list", elements="str"),
),
supports_check_mode=True,
)
arg_formats = dict(
database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"),
deploy=cmd_runner_fmt.as_bool("--deploy"),
fail_level=cmd_runner_fmt.as_opt_val("--fail-level"),
tags=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--tag"),
apps=cmd_runner_fmt.as_list(),
)
django_admin_cmd = "check"
django_admin_arg_order = "database deploy fail_level tags apps"
def main():
DjangoCheck.execute()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,83 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Alexei Znamensky <russoz@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: django_command
author:
- Alexei Znamensky (@russoz)
short_description: Run Django admin commands
version_added: 9.0.0
description:
- This module allows the execution of arbitrary Django admin commands.
extends_documentation_fragment:
- community.general.attributes
- community.general.django
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
command:
description:
- Django admin command. It must be a valid command accepted by C(python -m django) at the target system.
type: str
required: true
extra_args:
type: list
elements: str
description:
- List of extra arguments passed to the django admin command.
"""
EXAMPLES = """
- name: Check the project
community.general.django_command:
command: check
settings: myproject.settings
- name: Check the project in specified python path, using virtual environment
community.general.django_command:
command: check
settings: fancysite.settings
pythonpath: /home/joedoe/project/fancysite
venv: /home/joedoe/project/fancysite/venv
"""
RETURN = """
run_info:
description: Command-line execution information.
type: dict
returned: success and O(verbosity) >= 3
"""
from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper
from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt
class DjangoCommand(DjangoModuleHelper):
module = dict(
argument_spec=dict(
command=dict(type="str", required=True),
extra_args=dict(type="list", elements="str"),
),
supports_check_mode=False,
)
arg_formats = dict(
extra_args=cmd_runner_fmt.as_list(),
)
django_admin_arg_order = "extra_args"
def main():
DjangoCommand.execute()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,67 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Alexei Znamensky <russoz@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: django_createcachetable
author:
- Alexei Znamensky (@russoz)
short_description: Wrapper for C(django-admin createcachetable)
version_added: 9.1.0
description:
- This module is a wrapper for the execution of C(django-admin createcachetable).
extends_documentation_fragment:
- community.general.attributes
- community.general.django
- community.general.django.database
attributes:
check_mode:
support: full
diff_mode:
support: none
"""
EXAMPLES = """
- name: Create cache table in the default database
community.general.django_createcachetable:
settings: myproject.settings
- name: Create cache table in the other database
community.general.django_createcachetable:
database: myotherdb
settings: fancysite.settings
pythonpath: /home/joedoe/project/fancysite
venv: /home/joedoe/project/fancysite/venv
"""
RETURN = """
run_info:
description: Command-line execution information.
type: dict
returned: success and O(verbosity) >= 3
"""
from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper
class DjangoCreateCacheTable(DjangoModuleHelper):
module = dict(
supports_check_mode=True,
)
django_admin_cmd = "createcachetable"
django_admin_arg_order = "noinput database dry_run"
_django_args = ["noinput", "database", "dry_run"]
_check_mode_arg = "dry_run"
def main():
DjangoCreateCacheTable.execute()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,366 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
# Copyright (c) 2013, Scott Anderson <scottanderson42@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application
description:
- Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
O(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
command:
description:
- The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation.
- V(collectstatic) - Collects the static files into C(STATIC_ROOT).
- V(createcachetable) - Creates the cache tables for use with the database cache backend.
- V(flush) - Removes all data from the database.
- V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database.
- V(migrate) - Synchronizes the database state with models and migrations.
- V(test) - Runs tests for all installed apps.
- Other commands can be entered, but will fail if they are unknown to Django. Other commands that may
prompt for user input should be run with the C(--noinput) flag.
- Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0.
See note about supported versions of Django.
type: str
required: true
project_path:
description:
- The path to the root of the Django application where C(manage.py) lives.
type: path
required: true
aliases: [app_path, chdir]
settings:
description:
- The Python path to the application's settings module, such as V(myapp.settings).
type: path
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located
external to the application directory.
- This would be equivalent to adding O(pythonpath)'s value to the E(PYTHONPATH) environment variable.
type: path
required: false
aliases: [python_path]
virtualenv:
description:
- An optional path to a C(virtualenv) installation to use while running the manage application.
- The virtual environment must exist, otherwise the module will fail.
type: path
aliases: [virtual_env]
apps:
description:
- A list of space-delimited apps to target. Used by the V(test) command.
type: str
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the V(createcachetable) command.
type: str
required: false
clear:
description:
- Clear the existing files before trying to copy or link the original file.
- Used only with the V(collectstatic) command. The C(--noinput) argument will be added automatically.
required: false
default: false
type: bool
database:
description:
- The database to target. Used by the V(createcachetable), V(flush), V(loaddata), V(syncdb),
and V(migrate) commands.
type: str
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the V(test) command.
required: false
default: false
type: bool
aliases: [fail_fast]
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the V(loaddata) command.
type: str
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with V(migrate) command.
required: false
type: bool
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
parameter with V(migrate) command.
required: false
type: bool
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with
V(collectstatic) command.
required: false
type: bool
testrunner:
description:
- Controls the test runner class that is used to execute tests.
- This parameter is passed as-is to C(manage.py).
type: str
required: false
aliases: [test_runner]
ack_venv_creation_deprecation:
description:
- This option no longer has any effect since community.general 9.0.0.
- It will be removed from community.general 11.0.0.
type: bool
version_added: 5.8.0
notes:
- >
B(ATTENTION): Support for Django releases older than 4.1 has been removed in
community.general version 9.0.0. While the module allows for free-form commands
does not verify the version of Django being used, it is B(strongly recommended)
to use a more recent version of Django.
- Please notice that Django 4.1 requires Python 3.8 or greater.
- This module will not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment
does not already exist at the given location. This behavior changed in community.general version 9.0.0.
- The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip).
- This module assumes English error messages for the V(createcachetable) command to detect table existence,
unfortunately.
- To be able to use the V(collectstatic) command, you must have enabled C(staticfiles) in your settings.
- Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang,
for example C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
seealso:
- name: django-admin and manage.py Reference
description: Reference for C(django-admin) or C(manage.py) commands.
link: https://docs.djangoproject.com/en/4.1/ref/django-admin/
- name: Django Download page
description: The page showing how to get Django and the timeline of supported releases.
link: https://www.djangoproject.com/download/
- name: What Python version can I use with Django?
description: From the Django FAQ, the response to Python requirements for the framework.
link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django
requirements: [ "django >= 4.1" ]
author:
- Alexei Znamensky (@russoz)
- Scott Anderson (@tastychutney)
'''
EXAMPLES = """
- name: Run cleanup on the application installed in django_dir
community.general.django_manage:
command: clearsessions
project_path: "{{ django_dir }}"
- name: Load the initial_data fixture into the application
community.general.django_manage:
command: loaddata
project_path: "{{ django_dir }}"
fixtures: "{{ initial_data }}"
- name: Run syncdb on the application
community.general.django_manage:
command: migrate
project_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
virtualenv: "{{ virtualenv_dir }}"
- name: Run the SmokeTest test case from the main app. Useful for testing deploys
community.general.django_manage:
command: test
project_path: "{{ django_dir }}"
apps: main.SmokeTest
- name: Create an initial superuser
community.general.django_manage:
command: "createsuperuser --noinput --username=admin --email=admin@example.com"
project_path: "{{ django_dir }}"
"""
import os
import sys
import shlex
from ansible.module_utils.basic import AnsibleModule
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(venv_param, 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
module.fail_json(msg='%s does not point to a valid virtual environment' % venv_param)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_check_changed(output):
return "already exists" not in output
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def migrate_filter_output(line):
return ("Migrating forwards " in line) \
or ("Installed" in line and "Installed 0 object" not in line) \
or ("Applying" in line)
def collectstatic_filter_output(line):
return line and "0 static files" not in line
def main():
command_allowed_param_map = dict(
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
test=('failfast', 'testrunner', 'apps', ),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True, type='str'),
project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']),
settings=dict(type='path'),
pythonpath=dict(type='path', aliases=['python_path']),
virtualenv=dict(type='path', aliases=['virtual_env']),
apps=dict(),
cache_table=dict(type='str'),
clear=dict(default=False, type='bool'),
database=dict(type='str'),
failfast=dict(default=False, type='bool', aliases=['fail_fast']),
fixtures=dict(type='str'),
testrunner=dict(type='str', aliases=['test_runner']),
skip=dict(type='bool'),
merge=dict(type='bool'),
link=dict(type='bool'),
ack_venv_creation_deprecation=dict(type='bool', removed_in_version='11.0.0', removed_from_collection='community.general'),
),
)
command_split = shlex.split(module.params['command'])
command_bin = command_split[0]
project_path = module.params['project_path']
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if value and param not in command_allowed_param_map[command_bin]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin))
for param in command_required_param_map.get(command_bin, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command_bin))
_ensure_virtualenv(module)
run_cmd_args = ["./manage.py"] + command_split
if command_bin in noinput_commands and '--noinput' not in command_split:
run_cmd_args.append("--noinput")
for param in general_params:
if module.params[param]:
run_cmd_args.append('--%s=%s' % (param, module.params[param]))
for param in specific_boolean_params:
if module.params[param]:
run_cmd_args.append('--%s' % param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
if param in ('fixtures', 'apps'):
run_cmd_args.extend(shlex.split(module.params[param]))
else:
run_cmd_args.append(module.params[param])
rc, out, err = module.run_command(run_cmd_args, cwd=project_path)
if rc != 0:
if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'already exists.'
else:
if "Unknown command:" in err:
_fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin)
_fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command_bin + "_filter_output", None)
if filt:
filtered_output = list(filter(filt, lines))
if len(filtered_output):
changed = True
check_changed = globals().get("{0}_check_changed".format(command_bin), None)
if check_changed:
changed = check_changed(out)
module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path,
virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,225 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2023, Andrew Hyatt <andy@hyatt.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: dnf_config_manager
short_description: Enable or disable dnf repositories using config-manager
version_added: 8.2.0
description:
- This module enables or disables repositories using the C(dnf config-manager) sub-command.
author: Andrew Hyatt (@ahyattdev) <andy@hyatt.xyz>
requirements:
- dnf
- dnf-plugins-core
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- Repository ID, for example V(crb).
default: []
required: false
type: list
elements: str
state:
description:
- Whether the repositories should be V(enabled) or V(disabled).
default: enabled
required: false
type: str
choices: [enabled, disabled]
seealso:
- module: ansible.builtin.dnf
- module: ansible.builtin.yum_repository
'''
EXAMPLES = r'''
- name: Ensure the crb repository is enabled
community.general.dnf_config_manager:
name: crb
state: enabled
- name: Ensure the appstream and zfs repositories are disabled
community.general.dnf_config_manager:
name:
- appstream
- zfs
state: disabled
'''
RETURN = r'''
repo_states_pre:
description: Repo IDs before action taken.
returned: success
type: dict
contains:
enabled:
description: Enabled repository IDs.
returned: success
type: list
elements: str
disabled:
description: Disabled repository IDs.
returned: success
type: list
elements: str
sample:
enabled:
- appstream
- baseos
- crb
disabled:
- appstream-debuginfo
- appstream-source
- baseos-debuginfo
- baseos-source
- crb-debug
- crb-source
repo_states_post:
description: Repository states after action taken.
returned: success
type: dict
contains:
enabled:
description: Enabled repository IDs.
returned: success
type: list
elements: str
disabled:
description: Disabled repository IDs.
returned: success
type: list
elements: str
sample:
enabled:
- appstream
- baseos
- crb
disabled:
- appstream-debuginfo
- appstream-source
- baseos-debuginfo
- baseos-source
- crb-debug
- crb-source
changed_repos:
description: Repositories changed.
returned: success
type: list
elements: str
sample: [ 'crb' ]
'''
from ansible.module_utils.basic import AnsibleModule
import os
import re
DNF_BIN = "/usr/bin/dnf"
REPO_ID_RE = re.compile(r'^Repo-id\s*:\s*(\S+)$')
REPO_STATUS_RE = re.compile(r'^Repo-status\s*:\s*(disabled|enabled)$')
def get_repo_states(module):
rc, out, err = module.run_command([DNF_BIN, 'repolist', '--all', '--verbose'], check_rc=True)
repos = dict()
last_repo = ''
for i, line in enumerate(out.split('\n')):
m = REPO_ID_RE.match(line)
if m:
if len(last_repo) > 0:
module.fail_json(msg='dnf repolist parse failure: parsed another repo id before next status')
last_repo = m.group(1)
continue
m = REPO_STATUS_RE.match(line)
if m:
if len(last_repo) == 0:
module.fail_json(msg='dnf repolist parse failure: parsed status before repo id')
repos[last_repo] = m.group(1)
last_repo = ''
return repos
def set_repo_states(module, repo_ids, state):
module.run_command([DNF_BIN, 'config-manager', '--set-{0}'.format(state)] + repo_ids, check_rc=True)
def pack_repo_states_for_return(states):
enabled = []
disabled = []
for repo_id in states:
if states[repo_id] == 'enabled':
enabled.append(repo_id)
else:
disabled.append(repo_id)
# Sort for consistent results
enabled.sort()
disabled.sort()
return {'enabled': enabled, 'disabled': disabled}
def main():
module_args = dict(
name=dict(type='list', elements='str', required=False, default=[]),
state=dict(type='str', required=False, choices=['enabled', 'disabled'], default='enabled')
)
result = dict(
changed=False
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if not os.path.exists(DNF_BIN):
module.fail_json(msg="%s was not found" % DNF_BIN)
repo_states = get_repo_states(module)
result['repo_states_pre'] = pack_repo_states_for_return(repo_states)
desired_repo_state = module.params['state']
names = module.params['name']
to_change = []
for repo_id in names:
if repo_id not in repo_states:
module.fail_json(msg="did not find repo with ID '{0}' in dnf repolist --all --verbose".format(repo_id))
if repo_states[repo_id] != desired_repo_state:
to_change.append(repo_id)
result['changed'] = len(to_change) > 0
result['changed_repos'] = to_change
if module.check_mode:
module.exit_json(**result)
if len(to_change) > 0:
set_repo_states(module, to_change, desired_repo_state)
repo_states_post = get_repo_states(module)
result['repo_states_post'] = pack_repo_states_for_return(repo_states_post)
for repo_id in to_change:
if repo_states_post[repo_id] != desired_repo_state:
module.fail_json(msg="dnf config-manager failed to make '{0}' {1}".format(repo_id, desired_repo_state))
module.exit_json(**result)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,355 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Roberto Moreda <moreda@allenta.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: dnf_versionlock
version_added: '4.0.0'
short_description: Locks package versions in C(dnf) based systems
description:
- Locks package versions using the C(versionlock) plugin in C(dnf) based
systems. This plugin takes a set of name and versions for packages and
excludes all other versions of those packages. This allows you to for example
protect packages from being updated by newer versions. The state of the
plugin that reflects locking of packages is the C(locklist).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: partial
details:
- The logics of the C(versionlock) plugin for corner cases could be
confusing, so please take in account that this module will do its best to
give a C(check_mode) prediction on what is going to happen. In case of
doubt, check the documentation of the plugin.
- Sometimes the module could predict changes in C(check_mode) that will not
be such because C(versionlock) concludes that there is already a entry in
C(locklist) that already matches.
diff_mode:
support: none
options:
name:
description:
- Package name spec to add or exclude to or delete from the C(locklist)
using the format expected by the C(dnf repoquery) command.
- This parameter is mutually exclusive with O(state=clean).
type: list
required: false
elements: str
default: []
raw:
description:
- Do not resolve package name specs to NEVRAs to find specific version
to lock to. Instead the package name specs are used as they are. This
enables locking to not yet available versions of the package.
type: bool
default: false
state:
description:
- Whether to add (V(present) or V(excluded)) to or remove (V(absent) or
V(clean)) from the C(locklist).
- V(present) will add a package name spec to the C(locklist). If there is a
installed package that matches, then only that version will be added.
Otherwise, all available package versions will be added.
- V(excluded) will add a package name spec as excluded to the
C(locklist). It means that packages represented by the package name
spec will be excluded from transaction operations. All available
package versions will be added.
- V(absent) will delete entries in the C(locklist) that match the
package name spec.
- V(clean) will delete all entries in the C(locklist). This option is
mutually exclusive with O(name).
choices: [ 'absent', 'clean', 'excluded', 'present' ]
type: str
default: present
notes:
- In an ideal world, the C(versionlock) plugin would have a dry-run option to
know for sure what is going to happen. So far we have to work with a best
guess as close as possible to the behaviour inferred from its code.
- For most of cases where you want to lock and unlock specific versions of a
package, this works fairly well.
requirements:
- dnf
- dnf-plugin-versionlock
author:
- Roberto Moreda (@moreda) <moreda@allenta.com>
'''
EXAMPLES = r'''
- name: Prevent installed nginx from being updated
community.general.dnf_versionlock:
name: nginx
state: present
- name: Prevent multiple packages from being updated
community.general.dnf_versionlock:
name:
- nginx
- haproxy
state: present
- name: Remove lock from nginx to be updated again
community.general.dnf_versionlock:
package: nginx
state: absent
- name: Exclude bind 32:9.11 from installs or updates
community.general.dnf_versionlock:
package: bind-32:9.11*
state: excluded
- name: Keep bash package in major version 4
community.general.dnf_versionlock:
name: bash-0:4.*
raw: true
state: present
- name: Delete all entries in the locklist of versionlock
community.general.dnf_versionlock:
state: clean
'''
RETURN = r'''
locklist_pre:
description: Locklist before module execution.
returned: success
type: list
elements: str
sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ]
locklist_post:
description: Locklist after module execution.
returned: success and (not check mode or state is clean)
type: list
elements: str
sample: [ 'bash-0:4.4.20-1.el8_4.*' ]
specs_toadd:
description: Package name specs meant to be added by versionlock.
returned: success
type: list
elements: str
sample: [ 'bash' ]
specs_todelete:
description: Package name specs meant to be deleted by versionlock.
returned: success
type: list
elements: str
sample: [ 'bind' ]
'''
from ansible.module_utils.basic import AnsibleModule
import fnmatch
import os
import re
DNF_BIN = "/usr/bin/dnf"
VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf"
# NEVRA regex.
NEVRA_RE = re.compile(r"^(?P<name>.+)-(?P<epoch>\d+):(?P<version>.+)-"
r"(?P<release>.+)\.(?P<arch>.+)$")
def do_versionlock(module, command, patterns=None, raw=False):
patterns = [] if not patterns else patterns
raw_parameter = ["--raw"] if raw else []
# Call dnf versionlock using a just one full NEVR package-name-spec each
# time because multiple package-name-spec and globs are not well supported.
#
# This is a workaround for two alleged bugs in the dnf versionlock plugin:
# * Multiple package-name-spec arguments don't lock correctly
# (https://bugzilla.redhat.com/show_bug.cgi?id=2013324).
# * Locking a version of a not installed package disallows locking other
# versions later (https://bugzilla.redhat.com/show_bug.cgi?id=2013332)
#
# NOTE: This is suboptimal in terms of performance if there are more than a
# few package-name-spec patterns to lock, because there is a command
# execution per each. This will improve by changing the strategy once the
# mentioned alleged bugs in the dnf versionlock plugin are fixed.
if patterns:
outs = []
for p in patterns:
rc, out, err = module.run_command(
[DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p],
check_rc=True)
outs.append(out)
out = "\n".join(outs)
else:
rc, out, err = module.run_command(
[DNF_BIN, "-q", "versionlock", command], check_rc=True)
return out
# This is equivalent to the _match function of the versionlock plugin.
def match(entry, pattern):
entry = entry.lstrip('!')
if entry == pattern:
return True
m = NEVRA_RE.match(entry)
if not m:
return False
for name in (
'%s' % m["name"],
'%s.%s' % (m["name"], m["arch"]),
'%s-%s' % (m["name"], m["version"]),
'%s-%s-%s' % (m["name"], m["version"], m["release"]),
'%s-%s:%s' % (m["name"], m["epoch"], m["version"]),
'%s-%s-%s.%s' % (m["name"], m["version"], m["release"], m["arch"]),
'%s-%s:%s-%s' % (m["name"], m["epoch"], m["version"], m["release"]),
'%s:%s-%s-%s.%s' % (m["epoch"], m["name"], m["version"], m["release"],
m["arch"]),
'%s-%s:%s-%s.%s' % (m["name"], m["epoch"], m["version"], m["release"],
m["arch"])
):
if fnmatch.fnmatch(name, pattern):
return True
return False
def get_packages(module, patterns, only_installed=False):
packages_available_map_name_evrs = {}
rc, out, err = module.run_command(
[DNF_BIN, "-q", "repoquery"] +
(["--installed"] if only_installed else []) +
patterns,
check_rc=True)
for p in out.split():
# Extract the NEVRA pattern.
m = NEVRA_RE.match(p)
if not m:
module.fail_json(
msg="failed to parse nevra for %s" % p,
rc=rc, out=out, err=err)
evr = "%s:%s-%s" % (m["epoch"],
m["version"],
m["release"])
packages_available_map_name_evrs.setdefault(m["name"], set())
packages_available_map_name_evrs[m["name"]].add(evr)
return packages_available_map_name_evrs
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type="list", elements="str", default=[]),
raw=dict(type="bool", default=False),
state=dict(type="str", default="present",
choices=["present", "absent", "excluded", "clean"]),
),
supports_check_mode=True,
)
patterns = module.params["name"]
raw = module.params["raw"]
state = module.params["state"]
changed = False
msg = ""
# Check module pre-requisites.
if not os.path.exists(DNF_BIN):
module.fail_json(msg="%s was not found" % DNF_BIN)
if not os.path.exists(VERSIONLOCK_CONF):
module.fail_json(msg="plugin versionlock is required")
# Check incompatible options.
if state == "clean" and patterns:
module.fail_json(msg="clean state is incompatible with a name list")
if state != "clean" and not patterns:
module.fail_json(msg="name list is required for %s state" % state)
locklist_pre = do_versionlock(module, "list").split()
specs_toadd = []
specs_todelete = []
if state in ["present", "excluded"]:
if raw:
# Add raw patterns as specs to add.
for p in patterns:
if ((p if state == "present" else "!" + p)
not in locklist_pre):
specs_toadd.append(p)
else:
# Get available packages that match the patterns.
packages_map_name_evrs = get_packages(
module,
patterns)
# Get installed packages that match the patterns.
packages_installed_map_name_evrs = get_packages(
module,
patterns,
only_installed=True)
# Obtain the list of package specs that require an entry in the
# locklist. This list is composed by:
# a) the non-installed packages list with all available
# versions
# b) the installed packages list
packages_map_name_evrs.update(packages_installed_map_name_evrs)
for name in packages_map_name_evrs:
for evr in packages_map_name_evrs[name]:
locklist_entry = "%s-%s.*" % (name, evr)
if (locklist_entry if state == "present"
else "!%s" % locklist_entry) not in locklist_pre:
specs_toadd.append(locklist_entry)
if specs_toadd and not module.check_mode:
cmd = "add" if state == "present" else "exclude"
msg = do_versionlock(module, cmd, patterns=specs_toadd, raw=raw)
elif state == "absent":
if raw:
# Add raw patterns as specs to delete.
for p in patterns:
if p in locklist_pre:
specs_todelete.append(p)
else:
# Get patterns that match the some line in the locklist.
for p in patterns:
for e in locklist_pre:
if match(e, p):
specs_todelete.append(p)
if specs_todelete and not module.check_mode:
msg = do_versionlock(
module, "delete", patterns=specs_todelete, raw=raw)
elif state == "clean":
specs_todelete = locklist_pre
if specs_todelete and not module.check_mode:
msg = do_versionlock(module, "clear")
if specs_toadd or specs_todelete:
changed = True
response = {
"changed": changed,
"msg": msg,
"locklist_pre": locklist_pre,
"specs_toadd": specs_toadd,
"specs_todelete": specs_todelete
}
if not module.check_mode:
response["locklist_post"] = do_versionlock(module, "list").split()
else:
if state == "clean":
response["locklist_post"] = []
module.exit_json(**response)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,434 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright Ansible Project
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: dnsimple
short_description: Interface with dnsimple.com (a DNS hosting service)
description:
- "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
account_email:
description:
- Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) will be looked for.
- "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
- "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0"
type: str
account_api_token:
description:
- Account API token. See O(account_email) for more information.
type: str
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
- If omitted, a list of domains will be returned.
- If domain is present but the domain doesn't exist, it will be created.
type: str
record:
description:
- Record to add, if blank a record for the domain will be created, supports the wildcard (*).
type: str
record_ids:
description:
- List of records to ensure they either exist or do not exist.
type: list
elements: str
type:
description:
- The type of DNS record to create.
choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ]
type: str
ttl:
description:
- The TTL to give the new record in seconds.
default: 3600
type: int
value:
description:
- Record value.
- Must be specified when trying to ensure a record exists.
type: str
priority:
description:
- Record priority.
type: int
state:
description:
- whether the record should exist or not.
choices: [ 'present', 'absent' ]
default: present
type: str
solo:
description:
- Whether the record should be the only one for that record type and record name.
- Only use with O(state) is set to V(present) on a record.
type: 'bool'
default: false
sandbox:
description:
- Use the DNSimple sandbox environment.
- Requires a dedicated account in the dnsimple sandbox environment.
- Check U(https://developer.dnsimple.com/sandbox/) for more information.
type: 'bool'
default: false
version_added: 3.5.0
requirements:
- "dnsimple >= 2.0.0"
author: "Alex Coomans (@drcapulet)"
'''
EXAMPLES = '''
- name: Authenticate using email and API token and fetch all domains
community.general.dnsimple:
account_email: test@example.com
account_api_token: dummyapitoken
delegate_to: localhost
- name: Delete a domain
community.general.dnsimple:
domain: my.com
state: absent
delegate_to: localhost
- name: Create a test.my.com A record to point to 127.0.0.1
community.general.dnsimple:
domain: my.com
record: test
type: A
value: 127.0.0.1
delegate_to: localhost
register: record
- name: Delete record using record_ids
community.general.dnsimple:
domain: my.com
record_ids: '{{ record["id"] }}'
state: absent
delegate_to: localhost
- name: Create a my.com CNAME record to example.com
community.general.dnsimple:
domain: my.com
record: ''
type: CNAME
value: example.com
state: present
delegate_to: localhost
- name: Change TTL value for a record
community.general.dnsimple:
domain: my.com
record: ''
type: CNAME
value: example.com
ttl: 600
state: present
delegate_to: localhost
- name: Delete the record
community.general.dnsimple:
domain: my.com
record: ''
type: CNAME
value: example.com
state: absent
delegate_to: localhost
'''
RETURN = r"""# """
import traceback
import re
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
class DNSimpleV2():
"""class which uses dnsimple-python >= 2"""
def __init__(self, account_email, account_api_token, sandbox, module):
"""init"""
self.module = module
self.account_email = account_email
self.account_api_token = account_api_token
self.sandbox = sandbox
self.pagination_per_page = 30
self.dnsimple_client()
self.dnsimple_account()
def dnsimple_client(self):
"""creates a dnsimple client object"""
if self.account_email and self.account_api_token:
client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general")
else:
msg = "Option account_email or account_api_token not provided. " \
"Dnsimple authentication with a .dnsimple config file is not " \
"supported with dnsimple-python>=2.0.0"
raise DNSimpleException(msg)
client.identity.whoami()
self.client = client
def dnsimple_account(self):
"""select a dnsimple account. If a user token is used for authentication,
this user must only have access to a single account"""
account = self.client.identity.whoami().data.account
# user supplied a user token instead of account api token
if not account:
accounts = Accounts(self.client).list_accounts().data
if len(accounts) != 1:
msg = "The provided dnsimple token is a user token with multiple accounts." \
"Use an account token or a user token with access to a single account." \
"See https://support.dnsimple.com/articles/api-access-token/"
raise DNSimpleException(msg)
account = accounts[0]
self.account = account
def get_all_domains(self):
"""returns a list of all domains"""
domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id)
return [d.__dict__ for d in domain_list]
def get_domain(self, domain):
"""returns a single domain by name or id"""
try:
dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__
except DNSimpleException as e:
exception_string = str(e.message)
if re.match(r"^Domain .+ not found$", exception_string):
dr = None
else:
raise
return dr
def create_domain(self, domain):
"""create a single domain"""
return self.client.domains.create_domain(self.account.id, domain).data.__dict__
def delete_domain(self, domain):
"""delete a single domain"""
self.client.domains.delete_domain(self.account.id, domain)
def get_records(self, zone, dnsimple_filter=None):
"""return dns resource records which match a specified filter"""
records_list = self._get_paginated_result(self.client.zones.list_records,
account_id=self.account.id,
zone=zone, filter=dnsimple_filter)
return [d.__dict__ for d in records_list]
def delete_record(self, domain, rid):
"""delete a single dns resource record"""
self.client.zones.delete_record(self.account.id, domain, rid)
def update_record(self, domain, rid, ttl=None, priority=None):
"""update a single dns resource record"""
zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority)
result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__
return result
def create_record(self, domain, name, record_type, content, ttl=None, priority=None):
"""create a single dns resource record"""
zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority)
return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__
def _get_paginated_result(self, operation, **options):
"""return all results of a paginated api response"""
records_pagination = operation(per_page=self.pagination_per_page, **options).pagination
result_list = []
for page in range(1, records_pagination.total_pages + 1):
page_data = operation(per_page=self.pagination_per_page, page=page, **options).data
result_list.extend(page_data)
return result_list
DNSIMPLE_IMP_ERR = []
HAS_DNSIMPLE = False
try:
# try to import dnsimple >= 2.0.0
from dnsimple import Client, DNSimpleException
from dnsimple.service import Accounts
from dnsimple.version import version as dnsimple_version
from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput
HAS_DNSIMPLE = True
except ImportError:
DNSIMPLE_IMP_ERR.append(traceback.format_exc())
from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
def main():
module = AnsibleModule(
argument_spec=dict(
account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])),
account_api_token=dict(type='str',
no_log=True,
fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])),
domain=dict(type='str'),
record=dict(type='str'),
record_ids=dict(type='list', elements='str'),
type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF',
'URL', 'TXT', 'NS', 'SRV', 'NAPTR',
'PTR', 'AAAA', 'SSHFP', 'HINFO',
'POOL', 'CAA']),
ttl=dict(type='int', default=3600),
value=dict(type='str'),
priority=dict(type='int'),
state=dict(type='str', choices=['present', 'absent'], default='present'),
solo=dict(type='bool', default=False),
sandbox=dict(type='bool', default=False),
),
required_together=[
['record', 'value']
],
supports_check_mode=True,
)
if not HAS_DNSIMPLE:
module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0])
account_email = module.params.get('account_email')
account_api_token = module.params.get('account_api_token')
domain = module.params.get('domain')
record = module.params.get('record')
record_ids = module.params.get('record_ids')
record_type = module.params.get('type')
ttl = module.params.get('ttl')
value = module.params.get('value')
priority = module.params.get('priority')
state = module.params.get('state')
is_solo = module.params.get('solo')
sandbox = module.params.get('sandbox')
DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0]
try:
if DNSIMPLE_MAJOR_VERSION < 2:
module.fail_json(
msg='Support for python-dnsimple < 2 has been removed in community.general 5.0.0. Update python-dnsimple to version >= 2.0.0.')
ds = DNSimpleV2(account_email, account_api_token, sandbox, module)
# Let's figure out what operation we want to do
# No domain, return a list
if not domain:
all_domains = ds.get_all_domains()
module.exit_json(changed=False, result=all_domains)
# Domain & No record
if record is None and not record_ids:
if domain.isdigit():
typed_domain = int(domain)
else:
typed_domain = str(domain)
dr = ds.get_domain(typed_domain)
# domain does not exist
if state == 'present':
if dr:
module.exit_json(changed=False, result=dr)
else:
if module.check_mode:
module.exit_json(changed=True)
else:
response = ds.create_domain(domain)
module.exit_json(changed=True, result=response)
# state is absent
else:
if dr:
if not module.check_mode:
ds.delete_domain(domain)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
# need the not none check since record could be an empty string
if record is not None:
if not record_type:
module.fail_json(msg="Missing the record type")
if not value:
module.fail_json(msg="Missing the record value")
records_list = ds.get_records(domain, dnsimple_filter={'name': record})
rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None)
if state == 'present':
changed = False
if is_solo:
# delete any records that have the same name and record type
same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type]
if rr:
same_type = [rid for rid in same_type if rid != rr['id']]
if same_type:
if not module.check_mode:
for rid in same_type:
ds.delete_record(domain, rid)
changed = True
if rr:
# check if we need to update
if rr['ttl'] != ttl or rr['priority'] != priority:
if module.check_mode:
module.exit_json(changed=True)
else:
response = ds.update_record(domain, rr['id'], ttl, priority)
module.exit_json(changed=True, result=response)
else:
module.exit_json(changed=changed, result=rr)
else:
# create it
if module.check_mode:
module.exit_json(changed=True)
else:
response = ds.create_record(domain, record, record_type, value, ttl, priority)
module.exit_json(changed=True, result=response)
# state is absent
else:
if rr:
if not module.check_mode:
ds.delete_record(domain, rr['id'])
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
# Make sure these record_ids either all exist or none
if record_ids:
current_records = ds.get_records(domain, dnsimple_filter=None)
current_record_ids = [str(d['id']) for d in current_records]
wanted_record_ids = [str(r) for r in record_ids]
if state == 'present':
difference = list(set(wanted_record_ids) - set(current_record_ids))
if difference:
module.fail_json(msg="Missing the following records: %s" % difference)
else:
module.exit_json(changed=False)
# state is absent
else:
difference = list(set(wanted_record_ids) & set(current_record_ids))
if difference:
if not module.check_mode:
for rid in difference:
ds.delete_record(domain, rid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
except DNSimpleException as e:
if DNSIMPLE_MAJOR_VERSION > 1:
module.fail_json(msg="DNSimple exception: %s" % e.message)
else:
module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message']))
module.fail_json(msg="Unknown what you wanted me to do")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,331 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright Edward Hilgendorf, <edward@hilgendorf.me>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: dnsimple_info
short_description: Pull basic info from DNSimple API
version_added: "4.2.0"
description: Retrieve existing records and domains from DNSimple API.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
options:
name:
description:
- The domain name to retrieve info from.
- Will return all associated records for this domain if specified.
- If not specified, will return all domains associated with the account ID.
type: str
account_id:
description: The account ID to query.
required: true
type: str
api_key:
description: The API key to use.
required: true
type: str
record:
description:
- The record to find.
- If specified, only this record will be returned instead of all records.
required: false
type: str
sandbox:
description: Whether or not to use sandbox environment.
required: false
default: false
type: bool
author:
- Edward Hilgendorf (@edhilgendorf)
'''
EXAMPLES = r'''
- name: Get all domains from an account
community.general.dnsimple_info:
account_id: "1234"
api_key: "1234"
- name: Get all records from a domain
community.general.dnsimple_info:
name: "example.com"
account_id: "1234"
api_key: "1234"
- name: Get all info from a matching record
community.general.dnsimple_info:
name: "example.com"
record: "subdomain"
account_id: "1234"
api_key: "1234"
'''
RETURN = r'''
dnsimple_domain_info:
description: Returns a list of dictionaries of all domains associated with the supplied account ID.
type: list
elements: dict
returned: success when O(name) is not specified
sample:
- account_id: 1234
created_at: '2021-10-16T21:25:42Z'
id: 123456
last_transferred_at:
name: example.com
reverse: false
secondary: false
updated_at: '2021-11-10T20:22:50Z'
contains:
account_id:
description: The account ID.
type: int
created_at:
description: When the domain entry was created.
type: str
id:
description: ID of the entry.
type: int
last_transferred_at:
description: Date the domain was transferred, or empty if not.
type: str
name:
description: Name of the record.
type: str
reverse:
description: Whether or not it is a reverse zone record.
type: bool
updated_at:
description: When the domain entry was updated.
type: str
dnsimple_records_info:
description: Returns a list of dictionaries with all records for the domain supplied.
type: list
elements: dict
returned: success when O(name) is specified, but O(record) is not
sample:
- content: ns1.dnsimple.com admin.dnsimple.com
created_at: '2021-10-16T19:07:34Z'
id: 12345
name: 'catheadbiscuit'
parent_id: null
priority: null
regions:
- global
system_record: true
ttl: 3600
type: SOA
updated_at: '2021-11-15T23:55:51Z'
zone_id: example.com
contains:
content:
description: Content of the returned record.
type: str
created_at:
description: When the domain entry was created.
type: str
id:
description: ID of the entry.
type: int
name:
description: Name of the record.
type: str
parent_id:
description: Parent record or null.
type: int
priority:
description: Priority setting of the record.
type: str
regions:
description: List of regions where the record is available.
type: list
system_record:
description: Whether or not it is a system record.
type: bool
ttl:
description: Record TTL.
type: int
type:
description: Record type.
type: str
updated_at:
description: When the domain entry was updated.
type: str
zone_id:
description: ID of the zone that the record is associated with.
type: str
dnsimple_record_info:
description: Returns a list of dictionaries that match the record supplied.
returned: success when O(name) and O(record) are specified
type: list
elements: dict
sample:
- content: 1.2.3.4
created_at: '2021-11-15T23:55:51Z'
id: 123456
name: catheadbiscuit
parent_id: null
priority: null
regions:
- global
system_record: false
ttl: 3600
type: A
updated_at: '2021-11-15T23:55:51Z'
zone_id: example.com
contains:
content:
description: Content of the returned record.
type: str
created_at:
description: When the domain entry was created.
type: str
id:
description: ID of the entry.
type: int
name:
description: Name of the record.
type: str
parent_id:
description: Parent record or null.
type: int
priority:
description: Priority setting of the record.
type: str
regions:
description: List of regions where the record is available.
type: list
system_record:
description: Whether or not it is a system record.
type: bool
ttl:
description: Record TTL.
type: int
type:
description: Record type.
type: str
updated_at:
description: When the domain entry was updated.
type: str
zone_id:
description: ID of the zone that the record is associated with.
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils import deps
with deps.declare("requests"):
from requests import Request, Session
def build_url(account, key, is_sandbox):
headers = {'Accept': 'application/json',
'Authorization': 'Bearer {0}'.format(key)}
sandbox = '.sandbox' if is_sandbox else ''
url = 'https://api{sandbox}.dnsimple.com/v2/{account}'.format(sandbox=sandbox, account=account)
req = Request(url=url, headers=headers)
prepped_request = req.prepare()
return prepped_request
def iterate_data(module, request_object):
base_url = request_object.url
response = Session().send(request_object)
if 'pagination' not in response.json():
module.fail_json('API Call failed, check ID, key and sandbox values')
data = response.json()["data"]
total_pages = response.json()["pagination"]["total_pages"]
page = 1
while page < total_pages:
page = page + 1
request_object.url = '{url}&page={page}'.format(url=base_url, page=page)
new_results = Session().send(request_object)
data = data + new_results.json()['data']
return data
def record_info(dnsimple_mod, req_obj):
req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET'
return iterate_data(dnsimple_mod, req_obj)
def domain_info(dnsimple_mod, req_obj):
req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET'
return iterate_data(dnsimple_mod, req_obj)
def account_info(dnsimple_mod, req_obj):
req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET'
return iterate_data(dnsimple_mod, req_obj)
def main():
# define available arguments/parameters a user can pass to the module
fields = {
"account_id": {"required": True, "type": "str"},
"api_key": {"required": True, "type": "str", "no_log": True},
"name": {"required": False, "type": "str"},
"record": {"required": False, "type": "str"},
"sandbox": {"required": False, "type": "bool", "default": False}
}
result = {
'changed': False
}
module = AnsibleModule(
argument_spec=fields,
supports_check_mode=True
)
params = module.params
req = build_url(params['account_id'],
params['api_key'],
params['sandbox'])
deps.validate(module)
# At minimum we need account and key
if params['account_id'] and params['api_key']:
# If we have a record return info on that record
if params['name'] and params['record']:
result['dnsimple_record_info'] = record_info(module, req)
module.exit_json(**result)
# If we have the account only and domain, return records for the domain
elif params['name']:
result['dnsimple_records_info'] = domain_info(module, req)
module.exit_json(**result)
# If we have the account only, return domains
else:
result['dnsimple_domain_info'] = account_info(module, req)
module.exit_json(**result)
else:
module.fail_json(msg="Need at least account_id and api_key")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,724 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: dnsmadeeasy
short_description: Interface with dnsmadeeasy.com (a DNS hosting service)
description:
- >
Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
account_key:
description:
- Account API Key.
required: true
type: str
account_secret:
description:
- Account Secret Key.
required: true
type: str
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
resolution
required: true
type: str
sandbox:
description:
- Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
type: bool
default: false
record_name:
description:
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
of the state argument.
type: str
record_type:
description:
- Record type.
choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
type: str
record_value:
description:
- >
Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
- >
If record_value is not specified; no changes will be made and the record will be returned in 'result'
(in other words, this module can be used to fetch a record's current id, type, and ttl)
type: str
record_ttl:
description:
- record's "Time to live". Number of seconds the record remains cached in DNS servers.
default: 1800
type: int
state:
description:
- whether the record should exist or not
required: true
choices: [ 'present', 'absent' ]
type: str
validate_certs:
description:
- If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
monitor:
description:
- If V(true), add or change the monitor. This is applicable only for A records.
type: bool
default: false
systemDescription:
description:
- Description used by the monitor.
default: ''
type: str
maxEmails:
description:
- Number of emails sent to the contact list by the monitor.
default: 1
type: int
protocol:
description:
- Protocol used by the monitor.
default: 'HTTP'
choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
type: str
port:
description:
- Port used by the monitor.
default: 80
type: int
sensitivity:
description:
- Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
default: 'Medium'
choices: ['Low', 'Medium', 'High']
type: str
contactList:
description:
- Name or id of the contact list that the monitor will notify.
- The default V('') means the Account Owner.
type: str
httpFqdn:
description:
- The fully qualified domain name used by the monitor.
type: str
httpFile:
description:
- The file at the Fqdn that the monitor queries for HTTP or HTTPS.
type: str
httpQueryString:
description:
- The string in the httpFile that the monitor queries for HTTP or HTTPS.
type: str
failover:
description:
- If V(true), add or change the failover. This is applicable only for A records.
type: bool
default: false
autoFailover:
description:
- If true, fallback to the primary IP address is manual after a failover.
- If false, fallback to the primary IP address is automatic after a failover.
type: bool
default: false
ip1:
description:
- Primary IP address for the failover.
- Required if adding or changing the monitor or failover.
type: str
ip2:
description:
- Secondary IP address for the failover.
- Required if adding or changing the failover.
type: str
ip3:
description:
- Tertiary IP address for the failover.
type: str
ip4:
description:
- Quaternary IP address for the failover.
type: str
ip5:
description:
- Quinary IP address for the failover.
type: str
notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
seconds of actual time by using NTP.
- This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
These values can be be registered and used in your playbooks.
- Only A records can have a monitor or failover.
- To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
- To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
- The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)"
'''
EXAMPLES = '''
- name: Fetch my.com domain records
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
register: response
- name: Create a record
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
- name: Update the previously created record
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_value: 192.0.2.23
- name: Fetch a specific record
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
register: response
- name: Delete a record
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
record_type: A
state: absent
record_name: test
- name: Add a failover
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: true
ip1: 127.0.0.2
ip2: 127.0.0.3
- name: Add a failover
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: true
ip1: 127.0.0.2
ip2: 127.0.0.3
ip3: 127.0.0.4
ip4: 127.0.0.5
ip5: 127.0.0.6
- name: Add a monitor
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: true
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: my contact list
- name: Add a monitor with http options
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: true
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: 1174 # contact list id
httpFqdn: http://my.com
httpFile: example
httpQueryString: some string
- name: Add a monitor and a failover
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: true
ip1: 127.0.0.2
ip2: 127.0.0.3
monitor: true
protocol: HTTPS
port: 443
maxEmails: 1
systemDescription: monitoring my.com status
contactList: emergencycontacts
- name: Remove a failover
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: false
- name: Remove a monitor
community.general.dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: false
'''
# ============================================
# DNSMadeEasy module specific support methods.
#
import json
import hashlib
import hmac
import locale
from time import strftime, gmtime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.six import string_types
class DME2(object):
def __init__(self, apikey, secret, domain, sandbox, module):
self.module = module
self.api = apikey
self.secret = secret
if sandbox:
self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
else:
self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
self.domain = str(domain)
self.domain_map = None # ["domain_name"] => ID
self.record_map = None # ["record_name"] => ID
self.records = None # ["record_ID"] => <record>
self.all_records = None
self.contactList_map = None # ["contactList_name"] => ID
# Lookup the domain ID if passed as a domain name vs. ID
if not self.domain.isdigit():
self.domain = self.getDomainByName(self.domain)['id']
self.record_url = 'dns/managed/' + str(self.domain) + '/records'
self.monitor_url = 'monitor'
self.contactList_url = 'contactList'
def _headers(self):
currTime = self._get_date()
hashstring = self._create_hash(currTime)
headers = {'x-dnsme-apiKey': self.api,
'x-dnsme-hmac': hashstring,
'x-dnsme-requestDate': currTime,
'content-type': 'application/json'}
return headers
def _get_date(self):
locale.setlocale(locale.LC_TIME, 'C')
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
def _create_hash(self, rightnow):
return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
def query(self, resource, method, data=None):
url = self.baseurl + resource
if data and not isinstance(data, string_types):
data = urlencode(data)
response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return json.load(response)
except Exception:
return {}
def getDomain(self, domain_id):
if not self.domain_map:
self._instMap('domain')
return self.domains.get(domain_id, False)
def getDomainByName(self, domain_name):
if not self.domain_map:
self._instMap('domain')
return self.getDomain(self.domain_map.get(domain_name, 0))
def getDomains(self):
return self.query('dns/managed', 'GET')['data']
def getRecord(self, record_id):
if not self.record_map:
self._instMap('record')
return self.records.get(record_id, False)
# Try to find a single record matching this one.
# How we do this depends on the type of record. For instance, there
# can be several MX records for a single record_name while there can
# only be a single CNAME for a particular record_name. Note also that
# there can be several records with different types for a single name.
def getMatchingRecord(self, record_name, record_type, record_value):
# Get all the records if not already cached
if not self.all_records:
self.all_records = self.getRecords()
if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
for result in self.all_records:
if result['name'] == record_name and result['type'] == record_type:
return result
return False
elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
for result in self.all_records:
if record_type == "MX":
value = record_value.split(" ")[1]
# Note that TXT records are surrounded by quotes in the API response.
elif record_type == "TXT":
value = '"{0}"'.format(record_value)
elif record_type == "SRV":
value = record_value.split(" ")[3]
else:
value = record_value
if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
return result
return False
else:
raise Exception('record_type not yet supported')
def getRecords(self):
return self.query(self.record_url, 'GET')['data']
def _instMap(self, type):
# @TODO cache this call so it's executed only once per ansible execution
map = {}
results = {}
# iterate over e.g. self.getDomains() || self.getRecords()
for result in getattr(self, 'get' + type.title() + 's')():
map[result['name']] = result['id']
results[result['id']] = result
# e.g. self.domain_map || self.record_map
setattr(self, type + '_map', map)
setattr(self, type + 's', results) # e.g. self.domains || self.records
def prepareRecord(self, data):
return json.dumps(data, separators=(',', ':'))
def createRecord(self, data):
# @TODO update the cache w/ resultant record + id when implemented
return self.query(self.record_url, 'POST', data)
def updateRecord(self, record_id, data):
# @TODO update the cache w/ resultant record + id when implemented
return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
def deleteRecord(self, record_id):
# @TODO remove record from the cache when implemented
return self.query(self.record_url + '/' + str(record_id), 'DELETE')
def getMonitor(self, record_id):
return self.query(self.monitor_url + '/' + str(record_id), 'GET')
def updateMonitor(self, record_id, data):
return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
def prepareMonitor(self, data):
return json.dumps(data, separators=(',', ':'))
def getContactList(self, contact_list_id):
if not self.contactList_map:
self._instMap('contactList')
return self.contactLists.get(contact_list_id, False)
def getContactlists(self):
return self.query(self.contactList_url, 'GET')['data']
def getContactListByName(self, name):
if not self.contactList_map:
self._instMap('contactList')
return self.getContactList(self.contactList_map.get(name, 0))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_key=dict(required=True, no_log=True),
account_secret=dict(required=True, no_log=True),
domain=dict(required=True),
sandbox=dict(default=False, type='bool'),
state=dict(required=True, choices=['present', 'absent']),
record_name=dict(required=False),
record_type=dict(required=False, choices=[
'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
record_value=dict(required=False),
record_ttl=dict(required=False, default=1800, type='int'),
monitor=dict(default=False, type='bool'),
systemDescription=dict(default=''),
maxEmails=dict(default=1, type='int'),
protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
port=dict(default=80, type='int'),
sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
contactList=dict(default=None),
httpFqdn=dict(required=False),
httpFile=dict(required=False),
httpQueryString=dict(required=False),
failover=dict(default=False, type='bool'),
autoFailover=dict(default=False, type='bool'),
ip1=dict(required=False),
ip2=dict(required=False),
ip3=dict(required=False),
ip4=dict(required=False),
ip5=dict(required=False),
validate_certs=dict(default=True, type='bool'),
),
required_together=[
['record_value', 'record_ttl', 'record_type']
],
required_if=[
['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
]
)
protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
sensitivities = dict(Low=8, Medium=5, High=3)
DME = DME2(module.params["account_key"], module.params[
"account_secret"], module.params["domain"], module.params["sandbox"], module)
state = module.params["state"]
record_name = module.params["record_name"]
record_type = module.params["record_type"]
record_value = module.params["record_value"]
# Follow Keyword Controlled Behavior
if record_name is None:
domain_records = DME.getRecords()
if not domain_records:
module.fail_json(
msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
module.exit_json(changed=False, result=domain_records)
# Fetch existing record + Build new one
current_record = DME.getMatchingRecord(record_name, record_type, record_value)
new_record = {'name': record_name}
for i in ["record_value", "record_type", "record_ttl"]:
if not module.params[i] is None:
new_record[i[len("record_"):]] = module.params[i]
# Special handling for mx record
if new_record["type"] == "MX":
new_record["mxLevel"] = new_record["value"].split(" ")[0]
new_record["value"] = new_record["value"].split(" ")[1]
# Special handling for SRV records
if new_record["type"] == "SRV":
new_record["priority"] = new_record["value"].split(" ")[0]
new_record["weight"] = new_record["value"].split(" ")[1]
new_record["port"] = new_record["value"].split(" ")[2]
new_record["value"] = new_record["value"].split(" ")[3]
# Fetch existing monitor if the A record indicates it should exist and build the new monitor
current_monitor = dict()
new_monitor = dict()
if current_record and current_record['type'] == 'A' and current_record.get('monitor'):
current_monitor = DME.getMonitor(current_record['id'])
# Build the new monitor
for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
if module.params[i] is not None:
if i == 'protocol':
# The API requires protocol to be a numeric in the range 1-6
new_monitor['protocolId'] = protocols[module.params[i]]
elif i == 'sensitivity':
# The API requires sensitivity to be a numeric of 8, 5, or 3
new_monitor[i] = sensitivities[module.params[i]]
elif i == 'contactList':
# The module accepts either the name or the id of the contact list
contact_list_id = module.params[i]
if not contact_list_id.isdigit() and contact_list_id != '':
contact_list = DME.getContactListByName(contact_list_id)
if not contact_list:
module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
contact_list_id = contact_list.get('id', '')
new_monitor['contactListId'] = contact_list_id
else:
# The module option names match the API field names
new_monitor[i] = module.params[i]
# Compare new record against existing one
record_changed = False
if current_record:
for i in new_record:
# Remove leading and trailing quote character from values because TXT records
# are surrounded by quotes.
if str(current_record[i]).strip('"') != str(new_record[i]):
record_changed = True
new_record['id'] = str(current_record['id'])
monitor_changed = False
if current_monitor:
for i in new_monitor:
if str(current_monitor.get(i)) != str(new_monitor[i]):
monitor_changed = True
# Follow Keyword Controlled Behavior
if state == 'present':
# return the record if no value is specified
if "value" not in new_record:
if not current_record:
module.fail_json(
msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
# create record and monitor as the record does not exist
if not current_record:
record = DME.createRecord(DME.prepareRecord(new_record))
if new_monitor.get('monitor') and record_type == "A":
monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
else:
module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor))
# update the record
updated = False
if record_changed:
DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
updated = True
if monitor_changed:
DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
updated = True
if updated:
module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
# return the record (no changes)
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
elif state == 'absent':
changed = False
# delete the record (and the monitor/failover) if it exists
if current_record:
DME.deleteRecord(current_record['id'])
module.exit_json(changed=True)
# record does not exist, return w/o change.
module.exit_json(changed=changed)
else:
module.fail_json(
msg="'%s' is an unknown value for the state argument" % state)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,369 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020, Yann Amar <quidame@poivron.org>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: dpkg_divert
short_description: Override a debian package's version of a file
version_added: '0.2.0'
author:
- quidame (@quidame)
description:
- A diversion is for C(dpkg) the knowledge that only a given package
(or the local administrator) is allowed to install a file at a given
location. Other packages shipping their own version of this file will
be forced to O(divert) it, that is to install it at another location. It
allows one to keep changes in a file provided by a debian package by
preventing its overwrite at package upgrade.
- This module manages diversions of debian packages files using the
C(dpkg-divert) commandline tool. It can either create or remove a
diversion for a given file, but also update an existing diversion
to modify its O(holder) and/or its O(divert) location.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
options:
path:
description:
- The original and absolute path of the file to be diverted or
undiverted. This path is unique, i.e. it is not possible to get
two diversions for the same O(path).
required: true
type: path
state:
description:
- When O(state=absent), remove the diversion of the specified
O(path); when O(state=present), create the diversion if it does
not exist, or update its package O(holder) or O(divert) location,
if it already exists.
type: str
default: present
choices: [absent, present]
holder:
description:
- The name of the package whose copy of file is not diverted, also
known as the diversion holder or the package the diversion belongs
to.
- The actual package does not have to be installed or even to exist
for its name to be valid. If not specified, the diversion is hold
by 'LOCAL', that is reserved by/for dpkg for local diversions.
- This parameter is ignored when O(state=absent).
type: str
divert:
description:
- The location where the versions of file will be diverted.
- Default is to add suffix C(.distrib) to the file path.
- This parameter is ignored when O(state=absent).
type: path
rename:
description:
- Actually move the file aside (when O(state=present)) or back (when
O(state=absent)), but only when changing the state of the diversion.
This parameter has no effect when attempting to add a diversion that
already exists or when removing an unexisting one.
- Unless O(force=true), renaming fails if the destination file already
exists (this lock being a dpkg-divert feature, and bypassing it being
a module feature).
type: bool
default: false
force:
description:
- When O(rename=true) and O(force=true), renaming is performed even if
the target of the renaming exists, i.e. the existing contents of the
file at this location will be lost.
- This parameter is ignored when O(rename=false).
type: bool
default: false
requirements:
- dpkg-divert >= 1.15.0 (Debian family)
'''
EXAMPLES = r'''
- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
community.general.dpkg_divert:
path: /usr/bin/busybox
- name: Divert /usr/bin/busybox by package 'branding'
community.general.dpkg_divert:
path: /usr/bin/busybox
holder: branding
- name: Divert and rename busybox to busybox.dpkg-divert
community.general.dpkg_divert:
path: /usr/bin/busybox
divert: /usr/bin/busybox.dpkg-divert
rename: true
- name: Remove the busybox diversion and move the diverted file back
community.general.dpkg_divert:
path: /usr/bin/busybox
state: absent
rename: true
force: true
'''
RETURN = r'''
commands:
description: The dpkg-divert commands ran internally by the module.
type: list
returned: on_success
elements: str
sample: "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc"
messages:
description: The dpkg-divert relevant messages (stdout or stderr).
type: list
returned: on_success
elements: str
sample: "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'"
diversion:
description: The status of the diversion after task execution.
type: dict
returned: always
contains:
divert:
description: The location of the diverted file.
type: str
holder:
description: The package holding the diversion.
type: str
path:
description: The path of the file to divert/undivert.
type: str
state:
description: The state of the diversion.
type: str
sample:
{
"divert": "/etc/foobarrc.distrib",
"holder": "LOCAL",
"path": "/etc/foobarrc",
"state": "present"
}
'''
import re
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
def diversion_state(module, command, path):
diversion = dict(path=path, state='absent', divert=None, holder=None)
rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True)
if out:
diversion['state'] = 'present'
diversion['holder'] = out.rstrip()
rc, out, err = module.run_command([command, '--truename', path], check_rc=True)
diversion['divert'] = out.rstrip()
return diversion
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, type='path'),
state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
holder=dict(required=False, type='str'),
divert=dict(required=False, type='path'),
rename=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
path = module.params['path']
state = module.params['state']
holder = module.params['holder']
divert = module.params['divert']
rename = module.params['rename']
force = module.params['force']
diversion_wanted = dict(path=path, state=state)
changed = False
DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
MAINCOMMAND = [DPKG_DIVERT]
# Option --listpackage is needed and comes with 1.15.0
rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True)
[current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)]
if LooseVersion(current_version) < LooseVersion("1.15.0"):
module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1"))
b_path = to_bytes(path, errors='surrogate_or_strict')
path_exists = os.path.exists(b_path)
# Used for things not doable with a single dpkg-divert command (as forced
# renaming of files, and diversion's 'holder' or 'divert' updates).
target_exists = False
truename_exists = False
diversion_before = diversion_state(module, DPKG_DIVERT, path)
if diversion_before['state'] == 'present':
b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict')
truename_exists = os.path.exists(b_divert)
# Append options as requested in the task parameters, but ignore some of
# them when removing the diversion.
if rename:
MAINCOMMAND.append('--rename')
elif no_rename_is_supported:
MAINCOMMAND.append('--no-rename')
if state == 'present':
if holder and holder != 'LOCAL':
MAINCOMMAND.extend(['--package', holder])
diversion_wanted['holder'] = holder
else:
MAINCOMMAND.append('--local')
diversion_wanted['holder'] = 'LOCAL'
if divert:
MAINCOMMAND.extend(['--divert', divert])
target = divert
else:
target = '%s.distrib' % path
MAINCOMMAND.extend(['--add', path])
diversion_wanted['divert'] = target
b_target = to_bytes(target, errors='surrogate_or_strict')
target_exists = os.path.exists(b_target)
else:
MAINCOMMAND.extend(['--remove', path])
diversion_wanted['divert'] = None
diversion_wanted['holder'] = None
# Start to populate the returned objects.
diversion = diversion_before.copy()
maincommand = ' '.join(MAINCOMMAND)
commands = [maincommand]
if module.check_mode or diversion_wanted == diversion_before:
MAINCOMMAND.insert(1, '--test')
diversion_after = diversion_wanted
# Just try and see
rc, stdout, stderr = module.run_command(MAINCOMMAND)
if rc == 0:
messages = [stdout.rstrip()]
# else... cases of failure with dpkg-divert are:
# - The diversion does not belong to the same package (or LOCAL)
# - The divert filename is not the same (e.g. path.distrib != path.divert)
# - The renaming is forbidden by dpkg-divert (i.e. both the file and the
# diverted file exist)
elif state != diversion_before['state']:
# There should be no case with 'divert' and 'holder' when creating the
# diversion from none, and they're ignored when removing the diversion.
# So this is all about renaming...
if rename and path_exists and (
(state == 'absent' and truename_exists) or
(state == 'present' and target_exists)):
if not force:
msg = "Set 'force' param to True to force renaming of files."
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
stderr=stderr, stdout=stdout, diversion=diversion)
else:
msg = "Unexpected error while changing state of the diversion."
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
stderr=stderr, stdout=stdout, diversion=diversion)
to_remove = path
if state == 'present':
to_remove = target
if not module.check_mode:
try:
b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
os.unlink(b_remove)
except OSError as e:
msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
stderr=stderr, stdout=stdout, diversion=diversion)
rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
messages = [stdout.rstrip()]
# The situation is that we want to modify the settings (holder or divert)
# of an existing diversion. dpkg-divert does not handle this, and we have
# to remove the existing diversion first, and then set a new one.
else:
RMDIVERSION = [DPKG_DIVERT, '--remove', path]
if no_rename_is_supported:
RMDIVERSION.insert(1, '--no-rename')
rmdiversion = ' '.join(RMDIVERSION)
if module.check_mode:
RMDIVERSION.insert(1, '--test')
if rename:
MAINCOMMAND.remove('--rename')
if no_rename_is_supported:
MAINCOMMAND.insert(1, '--no-rename')
maincommand = ' '.join(MAINCOMMAND)
commands = [rmdiversion, maincommand]
rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)
if module.check_mode:
messages = [rmdout.rstrip(), 'Running in check mode']
else:
rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
messages = [rmdout.rstrip(), stdout.rstrip()]
# Avoid if possible to orphan files (i.e. to dereference them in diversion
# database but let them in place), but do not make renaming issues fatal.
# BTW, this module is not about state of files involved in the diversion.
old = diversion_before['divert']
new = diversion_wanted['divert']
if new != old:
b_old = to_bytes(old, errors='surrogate_or_strict')
b_new = to_bytes(new, errors='surrogate_or_strict')
if os.path.exists(b_old) and not os.path.exists(b_new):
try:
os.rename(b_old, b_new)
except OSError as e:
pass
if not module.check_mode:
diversion_after = diversion_state(module, DPKG_DIVERT, path)
diversion = diversion_after.copy()
diff = dict()
if module._diff:
diff['before'] = diversion_before
diff['after'] = diversion_after
if diversion_after != diversion_before:
changed = True
if diversion_after == diversion_wanted:
module.exit_json(changed=changed, diversion=diversion,
commands=commands, messages=messages, diff=diff)
else:
msg = "Unexpected error: see stdout and stderr for details."
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
stderr=stderr, stdout=stdout, diversion=diversion)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,206 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Matt Wright <matt@nobien.net>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: easy_install
short_description: Installs Python libraries
description:
- Installs Python libraries, optionally in a C(virtualenv)
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
type: str
description:
- A Python library name.
required: true
virtualenv:
type: str
description:
- An optional O(virtualenv) directory path to install into. If the
O(virtualenv) does not exist, it is created automatically.
virtualenv_site_packages:
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
type: bool
default: false
virtualenv_command:
type: str
description:
- The command to create the virtual environment with. For example
V(pyvenv), V(virtualenv), V(virtualenv2).
default: virtualenv
executable:
type: str
description:
- The explicit executable or a pathname to the executable to be used to
run easy_install for a specific version of Python installed in the
system. For example V(easy_install-3.3), if there are both Python 2.7
and 3.3 installations in the system and you want to run easy_install
for the Python 3.3 installation.
default: easy_install
state:
type: str
description:
- The desired state of the library. V(latest) ensures that the latest version is installed.
choices: [present, latest]
default: present
notes:
- Please note that the C(easy_install) module can only install Python
libraries. Thus this module is not able to remove libraries. It is
generally recommended to use the M(ansible.builtin.pip) module which you can first install
using M(community.general.easy_install).
- Also note that C(virtualenv) must be installed on the remote host if the
O(virtualenv) parameter is specified.
requirements: [ "virtualenv" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
- name: Install or update pip
community.general.easy_install:
name: pip
state: latest
- name: Install Bottle into the specified virtualenv
community.general.easy_install:
name: bottle
virtualenv: /webapps/myapp/venv
'''
import os
import os.path
import tempfile
from ansible.module_utils.basic import AnsibleModule
def install_package(module, name, easy_install, executable_arguments):
cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
rc, out, err = module.run_command(cmd)
return rc, out, err
def _is_package_installed(module, name, easy_install, executable_arguments):
# Copy and add to the arguments
executable_arguments = executable_arguments[:]
executable_arguments.append('--dry-run')
rc, out, err = install_package(module, name, easy_install, executable_arguments)
if rc:
module.fail_json(msg=err)
return 'Downloading' not in out
def _get_easy_install(module, env=None, executable=None):
candidate_easy_inst_basenames = ['easy_install']
easy_install = None
if executable is not None:
if os.path.isabs(executable):
easy_install = executable
else:
candidate_easy_inst_basenames.insert(0, executable)
if easy_install is None:
if env is None:
opt_dirs = []
else:
# Try easy_install with the virtualenv directory first.
opt_dirs = ['%s/bin' % env]
for basename in candidate_easy_inst_basenames:
easy_install = module.get_bin_path(basename, False, opt_dirs)
if easy_install is not None:
break
# easy_install should have been found by now. The final call to
# get_bin_path will trigger fail_json.
if easy_install is None:
basename = candidate_easy_inst_basenames[0]
easy_install = module.get_bin_path(basename, True, opt_dirs)
return easy_install
def main():
arg_spec = dict(
name=dict(required=True),
state=dict(required=False,
default='present',
choices=['present', 'latest'],
type='str'),
virtualenv=dict(default=None, required=False),
virtualenv_site_packages=dict(default=False, type='bool'),
virtualenv_command=dict(default='virtualenv', required=False),
executable=dict(default='easy_install', required=False),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
env = module.params['virtualenv']
executable = module.params['executable']
site_packages = module.params['virtualenv_site_packages']
virtualenv_command = module.params['virtualenv_command']
executable_arguments = []
if module.params['state'] == 'latest':
executable_arguments.append('--upgrade')
rc = 0
err = ''
out = ''
if env:
virtualenv = module.get_bin_path(virtualenv_command, True)
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
command = '%s %s' % (virtualenv, env)
if site_packages:
command += ' --system-site-packages'
cwd = tempfile.gettempdir()
rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
rc += rc_venv
out += out_venv
err += err_venv
easy_install = _get_easy_install(module, env, executable)
cmd = None
changed = False
installed = _is_package_installed(module, name, easy_install, executable_arguments)
if not installed:
if module.check_mode:
module.exit_json(changed=True)
rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
rc += rc_easy_inst
out += out_easy_inst
err += err_easy_inst
changed = True
if rc != 0:
module.fail_json(msg=err, cmd=cmd)
module.exit_json(changed=changed, binary=easy_install,
name=name, virtualenv=env)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,214 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ejabberd_user
author: "Peter Sprygada (@privateip)"
short_description: Manages users for ejabberd servers
requirements:
- ejabberd with mod_admin_extra
description:
- This module provides user management for ejabberd servers
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
username:
type: str
description:
- the name of the user to manage
required: true
host:
type: str
description:
- the ejabberd host associated with this username
required: true
password:
type: str
description:
- the password to assign to the username
required: false
logging:
description:
- enables or disables the local syslog facility for this module
required: false
default: false
type: bool
state:
type: str
description:
- describe the desired state of the user to be managed
required: false
default: 'present'
choices: [ 'present', 'absent' ]
notes:
- Password parameter is required for state == present only
- Passwords must be stored in clear text for this release
- The ejabberd configuration file must include mod_admin_extra as a module.
'''
EXAMPLES = '''
# Example playbook entries using the ejabberd_user module to manage users state.
- name: Create a user if it does not exist
community.general.ejabberd_user:
username: test
host: server
password: password
- name: Delete a user if it exists
community.general.ejabberd_user:
username: test
host: server
state: absent
'''
import syslog
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
class EjabberdUser(object):
""" This object represents a user resource for an ejabberd server. The
object manages user creation and deletion using ejabberdctl. The following
commands are currently supported:
* ejabberdctl register
* ejabberdctl unregister
"""
def __init__(self, module):
self.module = module
self.logging = module.params.get('logging')
self.state = module.params.get('state')
self.host = module.params.get('host')
self.user = module.params.get('username')
self.pwd = module.params.get('password')
self.runner = CmdRunner(
module,
command="ejabberdctl",
arg_formats=dict(
cmd=cmd_runner_fmt.as_list(),
host=cmd_runner_fmt.as_list(),
user=cmd_runner_fmt.as_list(),
pwd=cmd_runner_fmt.as_list(),
),
check_rc=False,
)
@property
def changed(self):
""" This method will check the current user and see if the password has
changed. It will return True if the user does not match the supplied
credentials and False if it does not
"""
return self.run_command('check_password', 'user host pwd', (lambda rc, out, err: bool(rc)))
@property
def exists(self):
""" This method will check to see if the supplied username exists for
host specified. If the user exists True is returned, otherwise False
is returned
"""
return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc)))
def log(self, entry):
""" This method will log information to the local syslog facility """
if self.logging:
syslog.openlog('ansible-%s' % self.module._name)
syslog.syslog(syslog.LOG_NOTICE, entry)
def run_command(self, cmd, options, process=None):
""" This method will run the any command specified and return the
returns using the Ansible common module
"""
def _proc(*a):
return a
if process is None:
process = _proc
with self.runner("cmd " + options, output_process=process) as ctx:
res = ctx.run(cmd=cmd, host=self.host, user=self.user, pwd=self.pwd)
self.log('command: %s' % " ".join(ctx.run_info['cmd']))
return res
def update(self):
""" The update method will update the credentials for the user provided
"""
return self.run_command('change_password', 'user host pwd')
def create(self):
""" The create method will create a new user on the host with the
password provided
"""
return self.run_command('register', 'user host pwd')
def delete(self):
""" The delete method will delete the user from the host
"""
return self.run_command('unregister', 'user host')
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True, type='str'),
username=dict(required=True, type='str'),
password=dict(type='str', no_log=True),
state=dict(default='present', choices=['present', 'absent']),
logging=dict(default=False, type='bool', removed_in_version='10.0.0', removed_from_collection='community.general'),
),
required_if=[
('state', 'present', ['password']),
],
supports_check_mode=True,
)
obj = EjabberdUser(module)
rc = None
result = dict(changed=False)
if obj.state == 'absent':
if obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.delete()
if rc != 0:
module.fail_json(msg=err, rc=rc)
elif obj.state == 'present':
if not obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.create()
elif obj.changed:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.update()
if rc is not None and rc != 0:
module.fail_json(msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,308 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
# Copyright (c) 2017, Sam Doran <sdoran@redhat.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: elasticsearch_plugin
short_description: Manage Elasticsearch plugins
description:
- Manages Elasticsearch plugins.
author:
- Mathew Davies (@ThePixelDeveloper)
- Sam Doran (@samdoran)
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- Name of the plugin to install.
required: true
type: str
state:
description:
- Desired state of a plugin.
choices: ["present", "absent"]
default: present
type: str
src:
description:
- Optionally set the source location to retrieve the plugin from. This can be a file://
URL to install from a local file, or a remote URL. If this is not set, the plugin
location is just based on the name.
- The name parameter must match the descriptor in the plugin ZIP specified.
- Is only used if the state would change, which is solely checked based on the name
parameter. If, for example, the plugin is already installed, changing this has no
effect.
- For ES 1.x use url.
required: false
type: str
url:
description:
- Set exact URL to download the plugin from (Only works for ES 1.x).
- For ES 2.x and higher, use src.
required: false
type: str
timeout:
description:
- "Timeout setting: 30s, 1m, 1h..."
- Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
default: 1m
type: str
force:
description:
- "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
default: false
type: bool
plugin_bin:
description:
- Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
type: path
plugin_dir:
description:
- Your configured plugin directory specified in Elasticsearch
default: /usr/share/elasticsearch/plugins/
type: path
proxy_host:
description:
- Proxy host to use during plugin installation
type: str
proxy_port:
description:
- Proxy port to use during plugin installation
type: str
version:
description:
- Version of the plugin to be installed.
If plugin exists with previous version, it will NOT be updated
type: str
'''
EXAMPLES = '''
- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
community.general.elasticsearch_plugin:
name: mobz/elasticsearch-head
state: present
- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x
community.general.elasticsearch_plugin:
name: mobz/elasticsearch-head
version: 2.0.0
- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x
community.general.elasticsearch_plugin:
name: mobz/elasticsearch-head
state: absent
- name: Install a specific plugin in Elasticsearch >= 5.0
community.general.elasticsearch_plugin:
name: analysis-icu
state: present
- name: Install the ingest-geoip plugin with a forced installation
community.general.elasticsearch_plugin:
name: ingest-geoip
state: present
force: true
'''
import os
from ansible.module_utils.basic import AnsibleModule
PACKAGE_STATE_MAP = dict(
present="install",
absent="remove"
)
PLUGIN_BIN_PATHS = tuple([
'/usr/share/elasticsearch/bin/elasticsearch-plugin',
'/usr/share/elasticsearch/bin/plugin'
])
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_name, plugin_dir):
return os.path.isdir(os.path.join(plugin_dir, plugin_name))
def parse_error(string):
reason = "ERROR: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
is_old_command = (os.path.basename(plugin_bin) == 'plugin')
# Timeout and version are only valid for plugin, not elasticsearch-plugin
if is_old_command:
if timeout:
cmd_args.append("--timeout %s" % timeout)
if version:
plugin_name = plugin_name + '/' + version
cmd_args[2] = plugin_name
if proxy_host and proxy_port:
cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
# Legacy ES 1.x
if url:
cmd_args.append("--url %s" % url)
if force:
cmd_args.append("--batch")
if src:
cmd_args.append(src)
else:
cmd_args.append(plugin_name)
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
return True, cmd, out, err
def get_plugin_bin(module, plugin_bin=None):
# Use the plugin_bin that was supplied first before trying other options
valid_plugin_bin = None
if plugin_bin and os.path.isfile(plugin_bin):
valid_plugin_bin = plugin_bin
else:
# Add the plugin_bin passed into the module to the top of the list of paths to test,
# testing for that binary name first before falling back to the default paths.
bin_paths = list(PLUGIN_BIN_PATHS)
if plugin_bin and plugin_bin not in bin_paths:
bin_paths.insert(0, plugin_bin)
# Get separate lists of dirs and binary names from the full paths to the
# plugin binaries.
plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
# Check for the binary names in the default system paths as well as the path
# specified in the module arguments.
for bin_file in plugin_bins:
valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
if valid_plugin_bin:
break
if not valid_plugin_bin:
module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
return valid_plugin_bin
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
src=dict(default=None),
url=dict(default=None),
timeout=dict(default="1m"),
force=dict(type='bool', default=False),
plugin_bin=dict(type="path"),
plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
proxy_host=dict(default=None),
proxy_port=dict(default=None),
version=dict(default=None)
),
mutually_exclusive=[("src", "url")],
supports_check_mode=True
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
src = module.params["src"]
timeout = module.params["timeout"]
force = module.params["force"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
proxy_host = module.params["proxy_host"]
proxy_port = module.params["proxy_port"]
version = module.params["version"]
# Search provided path and system paths for valid binary
plugin_bin = get_plugin_bin(module, plugin_bin)
repo = parse_plugin_repo(name)
present = is_plugin_present(repo, plugin_dir)
# skip if the state is correct
if (present and state == "present") or (state == "absent" and not present):
module.exit_json(changed=False, name=name, state=state)
if state == "present":
changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,181 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: emc_vnx_sg_member
short_description: Manage storage group member on EMC VNX
description:
- "This module manages the members of an existing storage group."
extends_documentation_fragment:
- community.general.emc.emc_vnx
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- Name of the Storage group to manage.
required: true
type: str
lunid:
description:
- Lun id to be added.
required: true
type: int
state:
description:
- Indicates the desired lunid state.
- V(present) ensures specified lunid is present in the Storage Group.
- V(absent) ensures specified lunid is absent from Storage Group.
default: present
choices: [ "present", "absent"]
type: str
author:
- Luca 'remix_tj' Lorenzetto (@remixtj)
'''
EXAMPLES = '''
- name: Add lun to storage group
community.general.emc_vnx_sg_member:
name: sg01
sp_address: sp1a.fqdn
sp_user: sysadmin
sp_password: sysadmin
lunid: 100
state: present
- name: Remove lun from storage group
community.general.emc_vnx_sg_member:
name: sg01
sp_address: sp1a.fqdn
sp_user: sysadmin
sp_password: sysadmin
lunid: 100
state: absent
'''
RETURN = '''
hluid:
description: LUNID that hosts attached to the storage group will see.
type: int
returned: success
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
LIB_IMP_ERR = None
try:
from storops import VNXSystem
from storops.exception import VNXCredentialError, VNXStorageGroupError, \
VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
HAS_LIB = True
except Exception:
LIB_IMP_ERR = traceback.format_exc()
HAS_LIB = False
def run_module():
module_args = dict(
name=dict(type='str', required=True),
lunid=dict(type='int', required=True),
state=dict(default='present', choices=['present', 'absent']),
)
module_args.update(emc_vnx_argument_spec)
result = dict(
changed=False,
hluid=None
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if not HAS_LIB:
module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
exception=LIB_IMP_ERR)
sp_user = module.params['sp_user']
sp_address = module.params['sp_address']
sp_password = module.params['sp_password']
alu = module.params['lunid']
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return result
try:
vnx = VNXSystem(sp_address, sp_user, sp_password)
sg = vnx.get_sg(module.params['name'])
if sg.existed:
if module.params['state'] == 'present':
if not sg.has_alu(alu):
try:
result['hluid'] = sg.attach_alu(alu)
result['changed'] = True
except VNXAluAlreadyAttachedError:
result['hluid'] = sg.get_hlu(alu)
except (VNXAttachAluError, VNXStorageGroupError) as e:
module.fail_json(msg='Error attaching {0}: '
'{1} '.format(alu, to_native(e)),
**result)
else:
result['hluid'] = sg.get_hlu(alu)
if module.params['state'] == 'absent' and sg.has_alu(alu):
try:
sg.detach_alu(alu)
result['changed'] = True
except VNXDetachAluNotFoundError:
# being not attached when using absent is OK
pass
except VNXStorageGroupError as e:
module.fail_json(msg='Error detaching alu {0}: '
'{1} '.format(alu, to_native(e)),
**result)
else:
module.fail_json(msg='No such storage group named '
'{0}'.format(module.params['name']),
**result)
except VNXCredentialError as e:
module.fail_json(msg='{0}'.format(to_native(e)), **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,261 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: etcd3
short_description: Set or delete key value pairs from an etcd3 cluster
requirements:
- etcd3
description:
- Sets or deletes values in etcd3 cluster using its v3 api.
- Needs python etcd3 lib to work
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
key:
type: str
description:
- the key where the information is stored in the cluster
required: true
value:
type: str
description:
- the information stored
required: true
host:
type: str
description:
- the IP address of the cluster
default: 'localhost'
port:
type: int
description:
- the port number used to connect to the cluster
default: 2379
state:
type: str
description:
- the state of the value for the key.
- can be present or absent
required: true
choices: [ present, absent ]
user:
type: str
description:
- The etcd user to authenticate with.
password:
type: str
description:
- The password to use for authentication.
- Required if O(user) is defined.
ca_cert:
type: path
description:
- The Certificate Authority to use to verify the etcd host.
- Required if O(client_cert) and O(client_key) are defined.
client_cert:
type: path
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- Required if O(client_key) is defined.
client_key:
type: path
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- Required if O(client_cert) is defined.
timeout:
type: int
description:
- The socket level timeout in seconds.
author:
- Jean-Philippe Evrard (@evrardjp)
- Victor Fauth (@vfauth)
'''
EXAMPLES = """
- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
community.general.etcd3:
key: "foo"
value: "baz3"
host: "localhost"
port: 2379
state: "present"
- name: Authenticate using user/password combination with a timeout of 10 seconds
community.general.etcd3:
key: "foo"
value: "baz3"
state: "present"
user: "someone"
password: "password123"
timeout: 10
- name: Authenticate using TLS certificates
community.general.etcd3:
key: "foo"
value: "baz3"
state: "present"
ca_cert: "/etc/ssl/certs/CA_CERT.pem"
client_cert: "/etc/ssl/certs/cert.crt"
client_key: "/etc/ssl/private/key.pem"
"""
RETURN = '''
key:
description: The key that was queried
returned: always
type: str
old_value:
description: The previous value in the cluster
returned: always
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
try:
import etcd3
HAS_ETCD = True
ETCD_IMP_ERR = None
except ImportError:
ETCD_IMP_ERR = traceback.format_exc()
HAS_ETCD = False
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
key=dict(type='str', required=True, no_log=False),
value=dict(type='str', required=True),
host=dict(type='str', default='localhost'),
port=dict(type='int', default=2379),
state=dict(type='str', required=True, choices=['present', 'absent']),
user=dict(type='str'),
password=dict(type='str', no_log=True),
ca_cert=dict(type='path'),
client_cert=dict(type='path'),
client_key=dict(type='path'),
timeout=dict(type='int'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
required_together=[['client_cert', 'client_key'], ['user', 'password']],
)
# It is possible to set `ca_cert` to verify the server identity without
# setting `client_cert` or `client_key` to authenticate the client
# so required_together is enough
# Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
# of either `client_cert` or `client_key` is enough
if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
result['key'] = module.params.get('key')
module.params['cert_cert'] = module.params.pop('client_cert')
module.params['cert_key'] = module.params.pop('client_key')
if not HAS_ETCD:
module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
'timeout', 'user', 'password']
# TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
# the minimum supported version
# client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
client_params = dict()
for key, value in module.params.items():
if key in allowed_keys:
client_params[key] = value
try:
etcd = etcd3.client(**client_params)
except Exception as exp:
module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
exception=traceback.format_exc())
try:
cluster_value = etcd.get(module.params['key'])
except Exception as exp:
module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
exception=traceback.format_exc())
# Make the cluster_value[0] a string for string comparisons
result['old_value'] = to_native(cluster_value[0])
if module.params['state'] == 'absent':
if cluster_value[0] is not None:
if module.check_mode:
result['changed'] = True
else:
try:
etcd.delete(module.params['key'])
except Exception as exp:
module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
exception=traceback.format_exc())
else:
result['changed'] = True
elif module.params['state'] == 'present':
if result['old_value'] != module.params['value']:
if module.check_mode:
result['changed'] = True
else:
try:
etcd.put(module.params['key'], module.params['value'])
except Exception as exp:
module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
exception=traceback.format_exc())
else:
result['changed'] = True
else:
module.fail_json(msg="State not recognized")
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
# during the execution of the module, if there is an exception or a
# conditional state that effectively causes a failure, run
# AnsibleModule.fail_json() to pass in the message and the result
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,80 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: facter
short_description: Runs the discovery program C(facter) on the remote system
description:
- Runs the C(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
JSON data that can be useful for inventory purposes.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
arguments:
description:
- Specifies arguments for facter.
type: list
elements: str
requirements:
- facter
- ruby-json
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Example command-line invocation
# ansible www.example.net -m facter
- name: Execute facter no arguments
community.general.facter:
- name: Execute facter with arguments
community.general.facter:
arguments:
- -p
- system_uptime
- timezone
- is_virtual
'''
import json
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
arguments=dict(required=False, type='list', elements='str')
)
)
facter_path = module.get_bin_path(
'facter',
opt_dirs=['/opt/puppetlabs/bin'])
cmd = [facter_path, "--json"]
if module.params['arguments']:
cmd += module.params['arguments']
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,90 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2023, Alexei Znamensky
# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: facter_facts
short_description: Runs the discovery program C(facter) on the remote system and return Ansible facts
version_added: 8.0.0
description:
- Runs the C(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning Ansible facts from the
JSON data that can be useful for inventory purposes.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.facts
- community.general.attributes.facts_module
options:
arguments:
description:
- Specifies arguments for facter.
type: list
elements: str
requirements:
- facter
- ruby-json
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
- name: Execute facter no arguments
community.general.facter_facts:
- name: Execute facter with arguments
community.general.facter_facts:
arguments:
- -p
- system_uptime
- timezone
- is_virtual
'''
RETURN = r'''
ansible_facts:
description: Dictionary with one key C(facter).
returned: always
type: dict
contains:
facter:
description: Dictionary containing facts discovered in the remote system.
returned: always
type: dict
'''
import json
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
arguments=dict(type='list', elements='str'),
),
supports_check_mode=True,
)
facter_path = module.get_bin_path(
'facter',
opt_dirs=['/opt/puppetlabs/bin'])
cmd = [facter_path, "--json"]
if module.params['arguments']:
cmd += module.params['arguments']
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(ansible_facts=dict(facter=json.loads(out)))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,492 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, quidame <quidame@poivron.org>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: filesize
short_description: Create a file with a given size, or resize it if it exists
description:
- This module is a simple wrapper around C(dd) to create, extend or truncate
a file, given its size. It can be used to manage swap files (that require
contiguous blocks) or alternatively, huge sparse files.
author:
- quidame (@quidame)
version_added: "3.0.0"
attributes:
check_mode:
support: full
diff_mode:
support: full
options:
path:
description:
- Path of the regular file to create or resize.
type: path
required: true
size:
description:
- Requested size of the file.
- The value is a number (either C(int) or C(float)) optionally followed
by a multiplicative suffix, that can be one of V(B) (bytes), V(KB) or
V(kB) (= 1000B), V(MB) or V(mB) (= 1000kB), V(GB) or V(gB) (= 1000MB),
and so on for V(T), V(P), V(E), V(Z) and V(Y); or alternatively one of
V(K), V(k) or V(KiB) (= 1024B); V(M), V(m) or V(MiB) (= 1024KiB);
V(G), V(g) or V(GiB) (= 1024MiB); and so on.
- If the multiplicative suffix is not provided, the value is treated as
an integer number of blocks of O(blocksize) bytes each (float values
are rounded to the closest integer).
- When the O(size) value is equal to the current file size, does nothing.
- When the O(size) value is bigger than the current file size, bytes from
O(source) (if O(sparse) is not V(false)) are appended to the file
without truncating it, in other words, without modifying the existing
bytes of the file.
- When the O(size) value is smaller than the current file size, it is
truncated to the requested value without modifying bytes before this
value.
- That means that a file of any arbitrary size can be grown to any other
arbitrary size, and then resized down to its initial size without
modifying its initial content.
type: raw
required: true
blocksize:
description:
- Size of blocks, in bytes if not followed by a multiplicative suffix.
- The numeric value (before the unit) B(MUST) be an integer (or a C(float)
if it equals an integer).
- If not set, the size of blocks is guessed from the OS and commonly
results in V(512) or V(4096) bytes, that is used internally by the
module or when O(size) has no unit.
type: raw
source:
description:
- Device or file that provides input data to provision the file.
- This parameter is ignored when O(sparse=true).
type: path
default: /dev/zero
force:
description:
- Whether or not to overwrite the file if it exists, in other words, to
truncate it from 0. When V(true), the module is not idempotent, that
means it always reports C(changed=true).
- O(force=true) and O(sparse=true) are mutually exclusive.
type: bool
default: false
sparse:
description:
- Whether or not the file to create should be a sparse file.
- This option is effective only on newly created files, or when growing a
file, only for the bytes to append.
- This option is not supported on OSes or filesystems not supporting sparse files.
- O(force=true) and O(sparse=true) are mutually exclusive.
type: bool
default: false
unsafe_writes:
description:
- This option is silently ignored. This module always modifies file
size in-place.
requirements:
- dd (Data Duplicator) in PATH
extends_documentation_fragment:
- ansible.builtin.files
- community.general.attributes
seealso:
- name: dd(1) manpage for Linux
description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils).
link: https://man7.org/linux/man-pages/man1/dd.1.html
- name: dd(1) manpage for IBM AIX
description: Manual page of the IBM AIX's dd implementation.
link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html
- name: dd(1) manpage for Mac OSX
description: Manual page of the Mac OSX's dd implementation.
link: https://www.unix.com/man-page/osx/1/dd/
- name: dd(1M) manpage for Solaris
description: Manual page of the Oracle Solaris's dd implementation.
link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html
- name: dd(1) manpage for FreeBSD
description: Manual page of the FreeBSD's dd implementation.
link: https://www.freebsd.org/cgi/man.cgi?dd(1)
- name: dd(1) manpage for OpenBSD
description: Manual page of the OpenBSD's dd implementation.
link: https://man.openbsd.org/dd
- name: dd(1) manpage for NetBSD
description: Manual page of the NetBSD's dd implementation.
link: https://man.netbsd.org/dd.1
- name: busybox(1) manpage for Linux
description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation.
link: https://www.unix.com/man-page/linux/1/busybox
'''
EXAMPLES = r'''
- name: Create a file of 1G filled with null bytes
community.general.filesize:
path: /var/bigfile
size: 1G
- name: Extend the file to 2G (2*1024^3)
community.general.filesize:
path: /var/bigfile
size: 2G
- name: Reduce the file to 2GB (2*1000^3)
community.general.filesize:
path: /var/bigfile
size: 2GB
- name: Fill a file with random bytes for backing a LUKS device
community.general.filesize:
path: ~/diskimage.luks
size: 512.0 MiB
source: /dev/urandom
- name: Take a backup of MBR boot code into a file, overwriting it if it exists
community.general.filesize:
path: /media/sdb1/mbr.bin
size: 440B
source: /dev/sda
force: true
- name: Create/resize a sparse file of/to 8TB
community.general.filesize:
path: /var/local/sparsefile
size: 8TB
sparse: true
- name: Create a file with specific size and attributes, to be used as swap space
community.general.filesize:
path: /var/swapfile
size: 2G
blocksize: 512B
mode: u=rw,go=
owner: root
group: root
'''
RETURN = r'''
cmd:
description: Command executed to create or resize the file.
type: str
returned: when changed or failed
sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024
filesize:
description: Dictionary of sizes related to the file.
type: dict
returned: always
contains:
blocks:
description: Number of blocks in the file.
type: int
sample: 500
blocksize:
description: Size of the blocks in bytes.
type: int
sample: 1024
bytes:
description: Size of the file, in bytes, as the product of RV(filesize.blocks) and RV(filesize.blocksize).
type: int
sample: 512000
iec:
description: Size of the file, in human-readable format, following IEC standard.
type: str
sample: 500.0 KiB
si:
description: Size of the file, in human-readable format, following SI standard.
type: str
sample: 512.0 kB
size_diff:
description: Difference (positive or negative) between old size and new size, in bytes.
type: int
sample: -1234567890
returned: always
path:
description: Realpath of the file if it is a symlink, otherwise the same than module's param.
type: str
sample: /var/swap0
returned: always
'''
import re
import os
import math
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
# These are the multiplicative suffixes understood (or returned) by dd and
# others (ls, df, lvresize, lsblk...).
SIZE_UNITS = dict(
B=1,
kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1,
MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2,
GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3,
TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4,
PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5,
EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6,
ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7,
YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8,
)
def bytes_to_human(size, iec=False):
"""Return human-readable size (with SI or IEC suffix) from bytes. This is
only to populate the returned result of the module, not to handle the
file itself (we only rely on bytes for that).
"""
unit = 'B'
for (u, v) in SIZE_UNITS.items():
if size < v:
continue
if iec:
if 'i' not in u or size / v >= 1024:
continue
else:
if v % 5 or size / v >= 1000:
continue
unit = u
hsize = round(size / SIZE_UNITS[unit], 2)
if unit == 'B':
hsize = int(hsize)
unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit)
if unit == 'KB':
unit = 'kB'
return '%s %s' % (str(hsize), unit)
def smart_blocksize(size, unit, product, bsize):
"""Ensure the total size can be written as blocks*blocksize, with blocks
and blocksize being integers.
"""
if not product % bsize:
return bsize
# Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes
# is not usable. The smallest integer number of kB to work with 512B blocks
# is 64, the nexts are 128, 192, 256, and so on.
unit_size = SIZE_UNITS[unit]
if size == int(size):
if unit_size > SIZE_UNITS['MiB']:
if unit_size % 5:
return SIZE_UNITS['MiB']
return SIZE_UNITS['MB']
return unit_size
if unit == 'B':
raise AssertionError("byte is the smallest unit and requires an integer value")
if 0 < product < bsize:
return product
for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2):
if not product % bsz:
return bsz
return 1
def split_size_unit(string, isint=False):
"""Split a string between the size value (int or float) and the unit.
Support optional space(s) between the numeric value and the unit.
"""
unit = re.sub(r'(\d|\.)', r'', string).strip()
value = float(re.sub(r'%s' % unit, r'', string).strip())
if isint and unit in ('B', ''):
if int(value) != value:
raise AssertionError("invalid blocksize value: bytes require an integer value")
if not unit:
unit = None
product = int(round(value))
else:
if unit not in SIZE_UNITS.keys():
raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." %
(unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get))))
product = int(round(value * SIZE_UNITS[unit]))
return value, unit, product
def size_string(value):
"""Convert a raw value to a string, but only if it is an integer, a float
or a string itself.
"""
if not isinstance(value, (int, float, str)):
raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value))
return str(value)
def size_spec(args):
"""Return a dictionary with size specifications, especially the size in
bytes (after rounding it to an integer number of blocks).
"""
blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2]
if blocksize_in_bytes == 0:
raise AssertionError("block size cannot be equal to zero")
size_value, size_unit, size_result = split_size_unit(args['size'])
if not size_unit:
blocks = int(math.ceil(size_value))
else:
blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes)
blocks = int(math.ceil(size_result / blocksize_in_bytes))
args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes)
args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes,
iec=bytes_to_human(round_bytes, True),
si=bytes_to_human(round_bytes))
return args['size_spec']
def current_size(args):
"""Return the size of the file at the given location if it exists, or None."""
path = args['path']
if os.path.exists(path):
if not os.path.isfile(path):
raise AssertionError("%s exists but is not a regular file" % path)
args['file_size'] = os.stat(path).st_size
else:
args['file_size'] = None
return args['file_size']
def complete_dd_cmdline(args, dd_cmd):
"""Compute dd options to grow or truncate a file."""
if args['file_size'] == args['size_spec']['bytes'] and not args['force']:
# Nothing to do.
return list()
bs = args['size_spec']['blocksize']
# For sparse files (create, truncate, grow): write count=0 block.
if args['sparse']:
seek = args['size_spec']['blocks']
elif args['force'] or not os.path.exists(args['path']): # Create file
seek = 0
elif args['size_diff'] < 0: # Truncate file
seek = args['size_spec']['blocks']
elif args['size_diff'] % bs: # Grow file
seek = int(args['file_size'] / bs) + 1
else:
seek = int(args['file_size'] / bs)
count = args['size_spec']['blocks'] - seek
dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)]
return dd_cmd
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True),
size=dict(type='raw', required=True),
blocksize=dict(type='raw'),
source=dict(type='path', default='/dev/zero'),
sparse=dict(type='bool', default=False),
force=dict(type='bool', default=False),
),
supports_check_mode=True,
add_file_common_args=True,
)
args = dict(**module.params)
diff = dict(before=dict(), after=dict())
if args['sparse'] and args['force']:
module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true')
if not os.path.exists(os.path.dirname(args['path'])):
module.fail_json(msg='parent directory of the file must exist prior to run this module')
if not args['blocksize']:
args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize)
try:
args['size'] = size_string(args['size'])
args['blocksize'] = size_string(args['blocksize'])
initial_filesize = current_size(args)
size_descriptors = size_spec(args)
except AssertionError as err:
module.fail_json(msg=to_native(err))
expected_filesize = size_descriptors['bytes']
if initial_filesize:
args['size_diff'] = expected_filesize - initial_filesize
diff['after']['size'] = expected_filesize
diff['before']['size'] = initial_filesize
result = dict(
changed=args['force'],
size_diff=args['size_diff'],
path=args['path'],
filesize=size_descriptors)
dd_bin = module.get_bin_path('dd', True)
dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']]
if expected_filesize != initial_filesize or args['force']:
result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd))
if module.check_mode:
result['changed'] = True
else:
result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd)
diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args)
if initial_filesize:
result['size_diff'] = result_filesize - initial_filesize
if not args['force']:
result['changed'] = result_filesize != initial_filesize
if result['rc']:
msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % (
args['path'], args['size'], args['source'])
module.fail_json(msg=msg, **result)
if result_filesize != expected_filesize:
msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % (
args['path'], args['size'], args['source'], result_filesize)
module.fail_json(msg=msg, **result)
# dd follows symlinks, and so does this module, while file module doesn't.
# If we call it, this is to manage file's mode, owner and so on, not the
# symlink's ones.
file_params = dict(**module.params)
if os.path.islink(args['path']):
file_params['path'] = result['path'] = os.path.realpath(args['path'])
if args['file_size'] is not None:
file_args = module.load_file_common_arguments(file_params)
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff)
result['diff'] = diff
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,738 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, quidame <quidame@poivron.org>
# Copyright (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
author:
- Alexander Bulimov (@abulimov)
- quidame (@quidame)
module: filesystem
short_description: Makes a filesystem
description:
- This module creates a filesystem.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- If O(state=present), the filesystem is created if it doesn't already
exist, that is the default behaviour if O(state) is omitted.
- If O(state=absent), filesystem signatures on O(dev) are wiped if it
contains a filesystem (as known by C(blkid)).
- When O(state=absent), all other options but O(dev) are ignored, and the
module does not fail if the device O(dev) doesn't actually exist.
type: str
choices: [ present, absent ]
default: present
version_added: 1.3.0
fstype:
choices: [ bcachefs, btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ]
description:
- Filesystem type to be created. This option is required with
O(state=present) (or if O(state) is omitted).
- ufs support has been added in community.general 3.4.0.
- bcachefs support has been added in community.general 8.6.0.
type: str
aliases: [type]
dev:
description:
- Target path to block device (Linux) or character device (FreeBSD) or
regular file (both).
- When setting Linux-specific filesystem types on FreeBSD, this module
only works when applying to regular files, aka disk images.
- Currently V(lvm) (Linux-only) and V(ufs) (FreeBSD-only) do not support
a regular file as their target O(dev).
- Support for character devices on FreeBSD has been added in community.general 3.4.0.
type: path
required: true
aliases: [device]
force:
description:
- If V(true), allows to create new filesystem on devices that already has filesystem.
type: bool
default: false
resizefs:
description:
- If V(true), if the block device and filesystem size differ, grow the filesystem into the space.
- Supported for C(bcachefs), C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems.
Attempts to resize other filesystem types will fail.
- XFS Will only grow if mounted. Currently, the module is based on commands
from C(util-linux) package to perform operations, so resizing of XFS is
not supported on FreeBSD systems.
- vFAT will likely fail if C(fatresize < 1.04).
- Mutually exclusive with O(uuid).
type: bool
default: false
opts:
description:
- List of options to be passed to C(mkfs) command.
type: str
uuid:
description:
- Set filesystem's UUID to the given value.
- The UUID options specified in O(opts) take precedence over this value.
- See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values.
- For O(fstype=lvm) the value is ignored, it resets the PV UUID if set.
- Supported for O(fstype) being one of C(bcachefs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), or C(xfs).
- This is B(not idempotent). Specifying this option will always result in a change.
- Mutually exclusive with O(resizefs).
type: str
version_added: 7.1.0
requirements:
- Uses specific tools related to the O(fstype) for creating or resizing a
filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on).
- Uses generic tools mostly related to the Operating System (Linux or
FreeBSD) or available on both, as C(blkid).
- On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required.
notes:
- Potential filesystems on O(dev) are checked using C(blkid). In case C(blkid)
is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also
unable to detect a filesystem), this filesystem is overwritten even if
O(force) is V(false).
- On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide
a C(blkid) command that is compatible with this module. However, these
packages conflict with each other, and only the C(util-linux) package
provides the command required to not fail when O(state=absent).
seealso:
- module: community.general.filesize
- module: ansible.posix.mount
- name: xfs_admin(8) manpage for Linux
description: Manual page of the GNU/Linux's xfs_admin implementation
link: https://man7.org/linux/man-pages/man8/xfs_admin.8.html
- name: tune2fs(8) manpage for Linux
description: Manual page of the GNU/Linux's tune2fs implementation
link: https://man7.org/linux/man-pages/man8/tune2fs.8.html
'''
EXAMPLES = '''
- name: Create a ext2 filesystem on /dev/sdb1
community.general.filesystem:
fstype: ext2
dev: /dev/sdb1
- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
community.general.filesystem:
fstype: ext4
dev: /dev/sdb1
opts: -cc
- name: Blank filesystem signature on /dev/sdb1
community.general.filesystem:
dev: /dev/sdb1
state: absent
- name: Create a filesystem on top of a regular file
community.general.filesystem:
dev: /path/to/disk.img
fstype: vfat
- name: Reset an xfs filesystem UUID on /dev/sdb1
community.general.filesystem:
fstype: xfs
dev: /dev/sdb1
uuid: generate
- name: Reset an ext4 filesystem UUID on /dev/sdb1
community.general.filesystem:
fstype: ext4
dev: /dev/sdb1
uuid: random
- name: Reset an LVM filesystem (PV) UUID on /dev/sdc
community.general.filesystem:
fstype: lvm
dev: /dev/sdc
uuid: random
'''
import os
import platform
import re
import stat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
class Device(object):
def __init__(self, module, path):
self.module = module
self.path = path
def size(self):
""" Return size in bytes of device. Returns int """
statinfo = os.stat(self.path)
if stat.S_ISBLK(statinfo.st_mode):
blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
devsize_in_bytes = int(out)
elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD':
diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True)
dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True)
devsize_in_bytes = int(out.split()[2])
elif os.path.isfile(self.path):
devsize_in_bytes = os.path.getsize(self.path)
else:
self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
return devsize_in_bytes
def get_mountpoint(self):
"""Return (first) mountpoint of device. Returns None when not mounted."""
cmd_findmnt = self.module.get_bin_path("findmnt", required=True)
# find mountpoint
rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output",
"TARGET", "--source", self.path], check_rc=False)
if rc != 0:
mountpoint = None
else:
mountpoint = mountpoint.split('\n')[0]
return mountpoint
def __str__(self):
return self.path
class Filesystem(object):
MKFS = None
MKFS_FORCE_FLAGS = []
MKFS_SET_UUID_OPTIONS = None
MKFS_SET_UUID_EXTRA_OPTIONS = []
INFO = None
GROW = None
GROW_MAX_SPACE_FLAGS = []
GROW_MOUNTPOINT_ONLY = False
CHANGE_UUID = None
CHANGE_UUID_OPTION = None
CHANGE_UUID_OPTION_HAS_ARG = True
LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
def __init__(self, module):
self.module = module
@property
def fstype(self):
return type(self).__name__
def get_fs_size(self, dev):
"""Return size in bytes of filesystem on device (integer).
Should query the info with a per-fstype command that can access the
device whenever it is mounted or not, and parse the command output.
Parser must ensure to return an integer, or raise a ValueError.
"""
raise NotImplementedError()
def create(self, opts, dev, uuid=None):
if self.module.check_mode:
return
if uuid and self.MKFS_SET_UUID_OPTIONS:
if not (set(self.MKFS_SET_UUID_OPTIONS) & set(opts)):
opts += [self.MKFS_SET_UUID_OPTIONS[0], uuid] + self.MKFS_SET_UUID_EXTRA_OPTIONS
mkfs = self.module.get_bin_path(self.MKFS, required=True)
cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)]
self.module.run_command(cmd, check_rc=True)
if uuid and self.CHANGE_UUID and self.MKFS_SET_UUID_OPTIONS is None:
self.change_uuid(new_uuid=uuid, dev=dev)
def wipefs(self, dev):
if self.module.check_mode:
return
# wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above)
# that is ported to FreeBSD. The use of dd as a portable fallback is
# not doable here if it needs get_mountpoint() (to prevent corruption of
# a mounted filesystem), since 'findmnt' is not available on FreeBSD,
# even in util-linux port for this OS.
wipefs = self.module.get_bin_path('wipefs', required=True)
cmd = [wipefs, "--all", str(dev)]
self.module.run_command(cmd, check_rc=True)
def grow_cmd(self, target):
"""Build and return the resizefs commandline as list."""
cmdline = [self.module.get_bin_path(self.GROW, required=True)]
cmdline += self.GROW_MAX_SPACE_FLAGS + [target]
return cmdline
def grow(self, dev):
"""Get dev and fs size and compare. Returns stdout of used command."""
devsize_in_bytes = dev.size()
try:
fssize_in_bytes = self.get_fs_size(dev)
except NotImplementedError:
self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype)
except ValueError as err:
self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err)))
self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev))
if not fssize_in_bytes < devsize_in_bytes:
self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
elif self.module.check_mode:
self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev))
if self.GROW_MOUNTPOINT_ONLY:
mountpoint = dev.get_mountpoint()
if not mountpoint:
self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype))
grow_target = mountpoint
else:
grow_target = str(dev)
dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True)
return out
def change_uuid_cmd(self, new_uuid, target):
"""Build and return the UUID change command line as list."""
cmdline = [self.module.get_bin_path(self.CHANGE_UUID, required=True)]
if self.CHANGE_UUID_OPTION_HAS_ARG:
cmdline += [self.CHANGE_UUID_OPTION, new_uuid, target]
else:
cmdline += [self.CHANGE_UUID_OPTION, target]
return cmdline
def change_uuid(self, new_uuid, dev):
"""Change filesystem UUID. Returns stdout of used command"""
if self.module.check_mode:
self.module.exit_json(change=True, msg='Changing %s filesystem UUID on device %s' % (self.fstype, dev))
dummy, out, dummy = self.module.run_command(self.change_uuid_cmd(new_uuid=new_uuid, target=str(dev)), check_rc=True)
return out
class Ext(Filesystem):
MKFS_FORCE_FLAGS = ['-F']
MKFS_SET_UUID_OPTIONS = ['-U']
INFO = 'tune2fs'
GROW = 'resize2fs'
CHANGE_UUID = 'tune2fs'
CHANGE_UUID_OPTION = "-U"
def get_fs_size(self, dev):
"""Get Block count and Block size and return their product."""
cmd = self.module.get_bin_path(self.INFO, required=True)
dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
block_count = block_size = None
for line in out.splitlines():
if 'Block count:' in line:
block_count = int(line.split(':')[1].strip())
elif 'Block size:' in line:
block_size = int(line.split(':')[1].strip())
if None not in (block_size, block_count):
break
else:
raise ValueError(repr(out))
return block_size * block_count
class Ext2(Ext):
MKFS = 'mkfs.ext2'
class Ext3(Ext):
MKFS = 'mkfs.ext3'
class Ext4(Ext):
MKFS = 'mkfs.ext4'
class XFS(Filesystem):
MKFS = 'mkfs.xfs'
MKFS_FORCE_FLAGS = ['-f']
INFO = 'xfs_info'
GROW = 'xfs_growfs'
GROW_MOUNTPOINT_ONLY = True
CHANGE_UUID = "xfs_admin"
CHANGE_UUID_OPTION = "-U"
def get_fs_size(self, dev):
"""Get bsize and blocks and return their product."""
cmdline = [self.module.get_bin_path(self.INFO, required=True)]
# Depending on the versions, xfs_info is able to get info from the
# device, whenever it is mounted or not, or only if unmounted, or
# only if mounted, or not at all. For any version until now, it is
# able to query info from the mountpoint. So try it first, and use
# device as the last resort: it may or may not work.
mountpoint = dev.get_mountpoint()
if mountpoint:
cmdline += [mountpoint]
else:
cmdline += [str(dev)]
dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV)
block_size = block_count = None
for line in out.splitlines():
col = line.split('=')
if col[0].strip() == 'data':
if col[1].strip() == 'bsize':
block_size = int(col[2].split()[0])
if col[2].split()[1] == 'blocks':
block_count = int(col[3].split(',')[0])
if None not in (block_size, block_count):
break
else:
raise ValueError(repr(out))
return block_size * block_count
class Reiserfs(Filesystem):
MKFS = 'mkfs.reiserfs'
MKFS_FORCE_FLAGS = ['-q']
class Bcachefs(Filesystem):
MKFS = 'mkfs.bcachefs'
MKFS_FORCE_FLAGS = ['--force']
MKFS_SET_UUID_OPTIONS = ['-U', '--uuid']
INFO = 'bcachefs'
GROW = 'bcachefs'
GROW_MAX_SPACE_FLAGS = ['device', 'resize']
def get_fs_size(self, dev):
"""Return size in bytes of filesystem on device (integer)."""
dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO),
'show-super', str(dev)], check_rc=True)
for line in stdout.splitlines():
if "Size: " in line:
parts = line.split()
unit = parts[2]
base = None
exp = None
units_2 = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"]
units_10 = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
try:
exp = units_2.index(unit)
base = 1024
except ValueError:
exp = units_10.index(unit)
base = 1000
if exp == 0:
value = int(parts[1])
else:
value = float(parts[1])
if base is not None and exp is not None:
return int(value * pow(base, exp))
raise ValueError(repr(stdout))
class Btrfs(Filesystem):
MKFS = 'mkfs.btrfs'
INFO = 'btrfs'
GROW = 'btrfs'
GROW_MAX_SPACE_FLAGS = ['filesystem', 'resize', 'max']
GROW_MOUNTPOINT_ONLY = True
def __init__(self, module):
super(Btrfs, self).__init__(module)
mkfs = self.module.get_bin_path(self.MKFS, required=True)
dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True)
match = re.search(r" v([0-9.]+)", stdout)
if not match:
# v0.20-rc1 use stderr
match = re.search(r" v([0-9.]+)", stderr)
if match:
# v0.20-rc1 doesn't have --force parameter added in following version v3.12
if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
self.MKFS_FORCE_FLAGS = ['-f']
else:
# assume version is greater or equal to 3.12
self.MKFS_FORCE_FLAGS = ['-f']
self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
def get_fs_size(self, dev):
"""Return size in bytes of filesystem on device (integer)."""
mountpoint = dev.get_mountpoint()
if not mountpoint:
self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype))
dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO),
'filesystem', 'usage', '-b', mountpoint], check_rc=True)
for line in stdout.splitlines():
if "Device size" in line:
return int(line.split()[-1])
raise ValueError(repr(stdout))
class Ocfs2(Filesystem):
MKFS = 'mkfs.ocfs2'
MKFS_FORCE_FLAGS = ['-Fx']
class F2fs(Filesystem):
MKFS = 'mkfs.f2fs'
INFO = 'dump.f2fs'
GROW = 'resize.f2fs'
def __init__(self, module):
super(F2fs, self).__init__(module)
mkfs = self.module.get_bin_path(self.MKFS, required=True)
dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV)
# Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
# mkfs.f2fs displays version since v1.2.0
match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
if match is not None:
# Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
# before that version -f switch wasn't used
if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
self.MKFS_FORCE_FLAGS = ['-f']
def get_fs_size(self, dev):
"""Get sector size and total FS sectors and return their product."""
cmd = self.module.get_bin_path(self.INFO, required=True)
dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
sector_size = sector_count = None
for line in out.splitlines():
if 'Info: sector size = ' in line:
# expected: 'Info: sector size = 512'
sector_size = int(line.split()[4])
elif 'Info: total FS sectors = ' in line:
# expected: 'Info: total FS sectors = 102400 (50 MB)'
sector_count = int(line.split()[5])
if None not in (sector_size, sector_count):
break
else:
raise ValueError(repr(out))
return sector_size * sector_count
class VFAT(Filesystem):
INFO = 'fatresize'
GROW = 'fatresize'
GROW_MAX_SPACE_FLAGS = ['-s', 'max']
def __init__(self, module):
super(VFAT, self).__init__(module)
if platform.system() == 'FreeBSD':
self.MKFS = 'newfs_msdos'
else:
self.MKFS = 'mkfs.vfat'
def get_fs_size(self, dev):
"""Get and return size of filesystem, in bytes."""
cmd = self.module.get_bin_path(self.INFO, required=True)
dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
fssize = None
for line in out.splitlines()[1:]:
parts = line.split(':', 1)
if len(parts) < 2:
continue
param, value = parts
if param.strip() in ('Size', 'Cur size'):
fssize = int(value.strip())
break
else:
raise ValueError(repr(out))
return fssize
class LVM(Filesystem):
MKFS = 'pvcreate'
MKFS_FORCE_FLAGS = ['-f']
MKFS_SET_UUID_OPTIONS = ['-u', '--uuid']
MKFS_SET_UUID_EXTRA_OPTIONS = ['--norestorefile']
INFO = 'pvs'
GROW = 'pvresize'
CHANGE_UUID = 'pvchange'
CHANGE_UUID_OPTION = '-u'
CHANGE_UUID_OPTION_HAS_ARG = False
def get_fs_size(self, dev):
"""Get and return PV size, in bytes."""
cmd = self.module.get_bin_path(self.INFO, required=True)
dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
pv_size = int(size)
return pv_size
class Swap(Filesystem):
MKFS = 'mkswap'
MKFS_FORCE_FLAGS = ['-f']
class UFS(Filesystem):
MKFS = 'newfs'
INFO = 'dumpfs'
GROW = 'growfs'
GROW_MAX_SPACE_FLAGS = ['-y']
def get_fs_size(self, dev):
"""Get providersize and fragment size and return their product."""
cmd = self.module.get_bin_path(self.INFO, required=True)
dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
fragmentsize = providersize = None
for line in out.splitlines():
if line.startswith('fsize'):
fragmentsize = int(line.split()[1])
elif 'providersize' in line:
providersize = int(line.split()[-1])
if None not in (fragmentsize, providersize):
break
else:
raise ValueError(repr(out))
return fragmentsize * providersize
FILESYSTEMS = {
'bcachefs': Bcachefs,
'ext2': Ext2,
'ext3': Ext3,
'ext4': Ext4,
'ext4dev': Ext4,
'f2fs': F2fs,
'reiserfs': Reiserfs,
'xfs': XFS,
'btrfs': Btrfs,
'vfat': VFAT,
'ocfs2': Ocfs2,
'LVM2_member': LVM,
'swap': Swap,
'ufs': UFS,
}
def main():
friendly_names = {
'lvm': 'LVM2_member',
}
fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
# There is no "single command" to manipulate filesystems, so we map them all out and their options
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
fstype=dict(type='str', aliases=['type'], choices=list(fstypes)),
dev=dict(type='path', required=True, aliases=['device']),
opts=dict(type='str'),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
uuid=dict(type='str', required=False),
),
required_if=[
('state', 'present', ['fstype'])
],
mutually_exclusive=[
('resizefs', 'uuid'),
],
supports_check_mode=True,
)
state = module.params['state']
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.params['force']
resizefs = module.params['resizefs']
uuid = module.params['uuid']
mkfs_opts = []
if opts is not None:
mkfs_opts = opts.split()
changed = False
if not os.path.exists(dev):
msg = "Device %s not found." % dev
if state == "present":
module.fail_json(msg=msg)
else:
module.exit_json(msg=msg)
dev = Device(module, dev)
# In case blkid/fstyp isn't able to identify an existing filesystem, device
# is considered as empty, then this existing filesystem would be overwritten
# even if force isn't enabled.
cmd = module.get_bin_path('blkid', required=True)
rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)])
fs = raw_fs.strip()
if not fs and platform.system() == 'FreeBSD':
cmd = module.get_bin_path('fstyp', required=True)
rc, raw_fs, err = module.run_command([cmd, str(dev)])
fs = raw_fs.strip()
if state == "present":
if fstype in friendly_names:
fstype = friendly_names[fstype]
try:
klass = FILESYSTEMS[fstype]
except KeyError:
module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
filesystem = klass(module)
if uuid and not (filesystem.CHANGE_UUID or filesystem.MKFS_SET_UUID_OPTIONS):
module.fail_json(changed=False, msg="module does not support UUID option for this filesystem (%s) yet." % fstype)
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
if same_fs and not resizefs and not uuid and not force:
module.exit_json(changed=False)
elif same_fs:
if resizefs:
if not filesystem.GROW:
module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
out = filesystem.grow(dev)
module.exit_json(changed=True, msg=out)
elif uuid:
out = filesystem.change_uuid(new_uuid=uuid, dev=dev)
module.exit_json(changed=True, msg=out)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err)
# create fs
filesystem.create(opts=mkfs_opts, dev=dev, uuid=uuid)
changed = True
elif fs:
# wipe fs signatures
filesystem = Filesystem(module)
filesystem.wipefs(dev)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,409 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: flatpak
short_description: Manage flatpaks
description:
- Allows users to add or remove flatpaks.
- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
author:
- John Kwiatkoski (@JayKayy)
- Alexander Bethke (@oolongbrothers)
requirements:
- flatpak
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: partial
details:
- If O(state=latest), the module will always return C(changed=true).
diff_mode:
support: none
options:
executable:
description:
- The path to the C(flatpak) executable to use.
- By default, this module looks for the C(flatpak) executable on the path.
type: path
default: flatpak
method:
description:
- The installation method to use.
- Defines if the C(flatpak) is supposed to be installed globally for the whole V(system)
or only for the current V(user).
type: str
choices: [ system, user ]
default: system
name:
description:
- The name of the flatpak to manage. To operate on several packages this
can accept a list of packages.
- When used with O(state=present), O(name) can be specified as a URL to a
C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
- Both C(https://) and C(http://) URLs are supported.
- When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote
to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
- When used with O(state=absent) or O(state=latest), it is recommended to specify the name in
the reverse DNS format.
- When supplying a URL with O(state=absent) or O(state=latest), the module will try to match the
installed flatpak based on the name of the flatpakref to remove or update it. However, there
is no guarantee that the names of the flatpakref file and the reverse DNS name of the
installed flatpak do match.
type: list
elements: str
required: true
no_dependencies:
description:
- If installing runtime dependencies should be omitted or not
- This parameter is primarily implemented for integration testing this module.
There might however be some use cases where you would want to have this, like when you are
packaging your own flatpaks.
type: bool
default: false
version_added: 3.2.0
remote:
description:
- The flatpak remote (repository) to install the flatpak from.
- By default, V(flathub) is assumed, but you do need to add the flathub flatpak_remote before
you can use this.
- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
type: str
default: flathub
state:
description:
- Indicates the desired package state.
- The value V(latest) is supported since community.general 8.6.0.
choices: [ absent, present, latest ]
type: str
default: present
'''
EXAMPLES = r'''
- name: Install the spotify flatpak
community.general.flatpak:
name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
state: present
- name: Install the gedit flatpak package without dependencies (not recommended)
community.general.flatpak:
name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
state: present
no_dependencies: true
- name: Install the gedit package from flathub for current user
community.general.flatpak:
name: org.gnome.gedit
state: present
method: user
- name: Install the Gnome Calendar flatpak from the gnome remote system-wide
community.general.flatpak:
name: org.gnome.Calendar
state: present
remote: gnome
- name: Install multiple packages
community.general.flatpak:
name:
- org.gimp.GIMP
- org.inkscape.Inkscape
- org.mozilla.firefox
- name: Update the spotify flatpak
community.general.flatpak:
name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
state: latest
- name: Update the gedit flatpak package without dependencies (not recommended)
community.general.flatpak:
name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
state: latest
no_dependencies: true
- name: Update the gedit package from flathub for current user
community.general.flatpak:
name: org.gnome.gedit
state: latest
method: user
- name: Update the Gnome Calendar flatpak from the gnome remote system-wide
community.general.flatpak:
name: org.gnome.Calendar
state: latest
remote: gnome
- name: Update multiple packages
community.general.flatpak:
name:
- org.gimp.GIMP
- org.inkscape.Inkscape
- org.mozilla.firefox
state: latest
- name: Remove the gedit flatpak
community.general.flatpak:
name: org.gnome.gedit
state: absent
- name: Remove multiple packages
community.general.flatpak:
name:
- org.gimp.GIMP
- org.inkscape.Inkscape
- org.mozilla.firefox
state: absent
'''
RETURN = r'''
command:
description: The exact flatpak command that was executed
returned: When a flatpak command has been executed
type: str
sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
msg:
description: Module error message
returned: failure
type: str
sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
rc:
description: Return code from flatpak binary
returned: When a flatpak command has been executed
type: int
sample: 0
stderr:
description: Error output from flatpak binary
returned: When a flatpak command has been executed
type: str
sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
stdout:
description: Output from flatpak binary
returned: When a flatpak command has been executed
type: str
sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
'''
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application"
def install_flat(module, binary, remote, names, method, no_dependencies):
"""Add new flatpaks."""
global result # pylint: disable=global-variable-not-assigned
uri_names = []
id_names = []
for name in names:
if name.startswith('http://') or name.startswith('https://'):
uri_names.append(name)
else:
id_names.append(name)
base_command = [binary, "install", "--{0}".format(method)]
flatpak_version = _flatpak_version(module, binary)
if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
base_command += ["-y"]
else:
base_command += ["--noninteractive"]
if no_dependencies:
base_command += ["--no-deps"]
if uri_names:
command = base_command + uri_names
_flatpak_command(module, module.check_mode, command)
if id_names:
command = base_command + [remote] + id_names
_flatpak_command(module, module.check_mode, command)
result['changed'] = True
def update_flat(module, binary, names, method, no_dependencies):
"""Update existing flatpaks."""
global result # pylint: disable=global-variable-not-assigned
installed_flat_names = [
_match_installed_flat_name(module, binary, name, method)
for name in names
]
command = [binary, "update", "--{0}".format(method)]
flatpak_version = _flatpak_version(module, binary)
if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
command += ["-y"]
else:
command += ["--noninteractive"]
if no_dependencies:
command += ["--no-deps"]
command += installed_flat_names
stdout = _flatpak_command(module, module.check_mode, command)
result["changed"] = (
True if module.check_mode else stdout.find("Nothing to do.") == -1
)
def uninstall_flat(module, binary, names, method):
"""Remove existing flatpaks."""
global result # pylint: disable=global-variable-not-assigned
installed_flat_names = [
_match_installed_flat_name(module, binary, name, method)
for name in names
]
command = [binary, "uninstall"]
flatpak_version = _flatpak_version(module, binary)
if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
command += ["-y"]
else:
command += ["--noninteractive"]
command += ["--{0}".format(method)] + installed_flat_names
_flatpak_command(module, module.check_mode, command)
result['changed'] = True
def flatpak_exists(module, binary, names, method):
"""Check if the flatpaks are installed."""
command = [binary, "list", "--{0}".format(method)]
output = _flatpak_command(module, False, command)
installed = []
not_installed = []
for name in names:
parsed_name = _parse_flatpak_name(name).lower()
if parsed_name in output.lower():
installed.append(name)
else:
not_installed.append(name)
return installed, not_installed
def _match_installed_flat_name(module, binary, name, method):
# This is a difficult function, since if the user supplies a flatpakref url,
# we have to rely on a naming convention:
# The flatpakref file name needs to match the flatpak name
global result # pylint: disable=global-variable-not-assigned
parsed_name = _parse_flatpak_name(name)
# Try running flatpak list with columns feature
command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
_flatpak_command(module, False, command, ignore_failure=True)
if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']:
# Probably flatpak before 1.2
matched_flatpak_name = \
_match_flat_using_flatpak_column_feature(module, binary, parsed_name, method)
else:
# Probably flatpak >= 1.2
matched_flatpak_name = \
_match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method)
if matched_flatpak_name:
return matched_flatpak_name
else:
result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\
"the name `{0}`. ".format(_parse_flatpak_name(name)) +\
"If you used a URL, try using the reverse DNS name of the flatpak"
module.fail_json(**result)
def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method):
global result # pylint: disable=global-variable-not-assigned
command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
output = _flatpak_command(module, False, command)
for row in output.split('\n'):
if parsed_name.lower() == row.lower():
return row
def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method):
global result # pylint: disable=global-variable-not-assigned
command = [binary, "list", "--{0}".format(method), "--app"]
output = _flatpak_command(module, False, command)
for row in output.split('\n'):
if parsed_name.lower() in row.lower():
return row.split()[0]
def _parse_flatpak_name(name):
if name.startswith('http://') or name.startswith('https://'):
file_name = urlparse(name).path.split('/')[-1]
file_name_without_extension = file_name.split('.')[0:-1]
common_name = ".".join(file_name_without_extension)
else:
common_name = name
return common_name
def _flatpak_version(module, binary):
global result # pylint: disable=global-variable-not-assigned
command = [binary, "--version"]
output = _flatpak_command(module, False, command)
version_number = output.split()[1]
return version_number
def _flatpak_command(module, noop, command, ignore_failure=False):
global result # pylint: disable=global-variable-not-assigned
result['command'] = ' '.join(command)
if noop:
result['rc'] = 0
return ""
result['rc'], result['stdout'], result['stderr'] = module.run_command(
command, check_rc=not ignore_failure
)
return result['stdout']
def main():
# This module supports check mode
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', elements='str', required=True),
remote=dict(type='str', default='flathub'),
method=dict(type='str', default='system',
choices=['user', 'system']),
state=dict(type='str', default='present',
choices=['absent', 'present', 'latest']),
no_dependencies=dict(type='bool', default=False),
executable=dict(type='path', default='flatpak')
),
supports_check_mode=True,
)
name = module.params['name']
state = module.params['state']
remote = module.params['remote']
no_dependencies = module.params['no_dependencies']
method = module.params['method']
executable = module.params['executable']
binary = module.get_bin_path(executable, None)
global result
result = dict(
changed=False
)
# If the binary was not found, fail the operation
if not binary:
module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
installed, not_installed = flatpak_exists(module, binary, name, method)
if state == 'absent' and installed:
uninstall_flat(module, binary, installed, method)
else:
if state == 'latest' and installed:
update_flat(module, binary, installed, method, no_dependencies)
if state in ('present', 'latest') and not_installed:
install_flat(module, binary, remote, not_installed, method, no_dependencies)
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,273 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: flatpak_remote
short_description: Manage flatpak repository remotes
description:
- Allows users to add or remove flatpak remotes.
- The flatpak remotes concept is comparable to what is called repositories in other packaging
formats.
- Currently, remote addition is only supported via C(flatpakrepo) file URLs.
- Existing remotes will not be updated.
- See the M(community.general.flatpak) module for managing flatpaks.
author:
- John Kwiatkoski (@JayKayy)
- Alexander Bethke (@oolongbrothers)
requirements:
- flatpak
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
executable:
description:
- The path to the C(flatpak) executable to use.
- By default, this module looks for the C(flatpak) executable on the path.
type: str
default: flatpak
flatpakrepo_url:
description:
- The URL to the C(flatpakrepo) file representing the repository remote to add.
- When used with O(state=present), the flatpak remote specified under the O(flatpakrepo_url)
is added using the specified installation O(method).
- When used with O(state=absent), this is not required.
- Required when O(state=present).
type: str
method:
description:
- The installation method to use.
- Defines if the C(flatpak) is supposed to be installed globally for the whole V(system)
or only for the current V(user).
type: str
choices: [ system, user ]
default: system
name:
description:
- The desired name for the flatpak remote to be registered under on the managed host.
- When used with O(state=present), the remote will be added to the managed host under
the specified O(name).
- When used with O(state=absent) the remote with that name will be removed.
type: str
required: true
state:
description:
- Indicates the desired package state.
type: str
choices: [ absent, present ]
default: present
enabled:
description:
- Indicates whether this remote is enabled.
type: bool
default: true
version_added: 6.4.0
'''
EXAMPLES = r'''
- name: Add the Gnome flatpak remote to the system installation
community.general.flatpak_remote:
name: gnome
state: present
flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
- name: Add the flathub flatpak repository remote to the user installation
community.general.flatpak_remote:
name: flathub
state: present
flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
method: user
- name: Remove the Gnome flatpak remote from the user installation
community.general.flatpak_remote:
name: gnome
state: absent
method: user
- name: Remove the flathub remote from the system installation
community.general.flatpak_remote:
name: flathub
state: absent
- name: Disable the flathub remote in the system installation
community.general.flatpak_remote:
name: flathub
state: present
enabled: false
'''
RETURN = r'''
command:
description: The exact flatpak command that was executed
returned: When a flatpak command has been executed
type: str
sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
msg:
description: Module error message
returned: failure
type: str
sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
rc:
description: Return code from flatpak binary
returned: When a flatpak command has been executed
type: int
sample: 0
stderr:
description: Error output from flatpak binary
returned: When a flatpak command has been executed
type: str
sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
stdout:
description: Output from flatpak binary
returned: When a flatpak command has been executed
type: str
sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
def add_remote(module, binary, name, flatpakrepo_url, method):
"""Add a new remote."""
global result # pylint: disable=global-variable-not-assigned
command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url]
_flatpak_command(module, module.check_mode, command)
result['changed'] = True
def remove_remote(module, binary, name, method):
"""Remove an existing remote."""
global result # pylint: disable=global-variable-not-assigned
command = [binary, "remote-delete", "--{0}".format(method), "--force", name]
_flatpak_command(module, module.check_mode, command)
result['changed'] = True
def remote_exists(module, binary, name, method):
"""Check if the remote exists."""
command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)]
# The query operation for the remote needs to be run even in check mode
output = _flatpak_command(module, False, command)
for line in output.splitlines():
listed_remote = line.split()
if len(listed_remote) == 0:
continue
if listed_remote[0] == to_native(name):
return True
return False
def enable_remote(module, binary, name, method):
"""Enable a remote."""
global result # pylint: disable=global-variable-not-assigned
command = [binary, "remote-modify", "--enable", "--{0}".format(method), name]
_flatpak_command(module, module.check_mode, command)
result['changed'] = True
def disable_remote(module, binary, name, method):
"""Disable a remote."""
global result # pylint: disable=global-variable-not-assigned
command = [binary, "remote-modify", "--disable", "--{0}".format(method), name]
_flatpak_command(module, module.check_mode, command)
result['changed'] = True
def remote_enabled(module, binary, name, method):
"""Check if the remote is enabled."""
command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)]
# The query operation for the remote needs to be run even in check mode
output = _flatpak_command(module, False, command)
for line in output.splitlines():
listed_remote = line.split()
if len(listed_remote) == 0:
continue
if listed_remote[0] == to_native(name):
return len(listed_remote) == 1 or "disabled" not in listed_remote[1].split(",")
return False
def _flatpak_command(module, noop, command):
global result # pylint: disable=global-variable-not-assigned
result['command'] = ' '.join(command)
if noop:
result['rc'] = 0
return ""
result['rc'], result['stdout'], result['stderr'] = module.run_command(
command, check_rc=True
)
return result['stdout']
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
flatpakrepo_url=dict(type='str'),
method=dict(type='str', default='system',
choices=['user', 'system']),
state=dict(type='str', default="present",
choices=['absent', 'present']),
enabled=dict(type='bool', default=True),
executable=dict(type='str', default="flatpak")
),
# This module supports check mode
supports_check_mode=True,
)
name = module.params['name']
flatpakrepo_url = module.params['flatpakrepo_url']
method = module.params['method']
state = module.params['state']
enabled = module.params['enabled']
executable = module.params['executable']
binary = module.get_bin_path(executable, None)
if flatpakrepo_url is None:
flatpakrepo_url = ''
global result
result = dict(
changed=False
)
# If the binary was not found, fail the operation
if not binary:
module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
if state == 'present' and not remote_already_exists:
add_remote(module, binary, name, flatpakrepo_url, method)
elif state == 'absent' and remote_already_exists:
remove_remote(module, binary, name, method)
if state == 'present':
remote_already_enabled = remote_enabled(module, binary, to_bytes(name), method)
if enabled and not remote_already_enabled:
enable_remote(module, binary, name, method)
if not enabled and remote_already_enabled:
disable_remote(module, binary, name, method)
module.exit_json(**result)
if __name__ == '__main__':
main()

Some files were not shown because too many files have changed in this diff Show More