update collections/requirements.yml
This commit is contained in:
@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is based on
|
||||
# Lib/posixpath.py of cpython
|
||||
# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
#
|
||||
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
# otherwise using this software ("Python") in source or binary form and
|
||||
# its associated documentation.
|
||||
#
|
||||
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
# analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
# distribute, and otherwise use Python alone or in any derivative version,
|
||||
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
|
||||
# are retained in Python alone or in any derivative version prepared by Licensee.
|
||||
#
|
||||
# 3. In the event Licensee prepares a derivative work that is based on
|
||||
# or incorporates Python or any part thereof, and wants to make
|
||||
# the derivative work available to others as provided herein, then
|
||||
# Licensee hereby agrees to include in any such work a brief summary of
|
||||
# the changes made to Python.
|
||||
#
|
||||
# 4. PSF is making Python available to Licensee on an "AS IS"
|
||||
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
# INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
#
|
||||
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
#
|
||||
# 6. This License Agreement will automatically terminate upon a material
|
||||
# breach of its terms and conditions.
|
||||
#
|
||||
# 7. Nothing in this License Agreement shall be deemed to create any
|
||||
# relationship of agency, partnership, or joint venture between PSF and
|
||||
# Licensee. This License Agreement does not grant permission to use PSF
|
||||
# trademarks or trade name in a trademark sense to endorse or promote
|
||||
# products or services of Licensee, or any third party.
|
||||
#
|
||||
# 8. By copying, installing or otherwise using Python, Licensee
|
||||
# agrees to be bound by the terms and conditions of this License
|
||||
# Agreement.
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def ismount(path):
|
||||
"""Test whether a path is a mount point
|
||||
This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround
|
||||
until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python
|
||||
that may not have the upstream fix.
|
||||
https://github.com/ansible/ansible-modules-core/issues/2186
|
||||
http://bugs.python.org/issue2466
|
||||
"""
|
||||
try:
|
||||
s1 = os.lstat(path)
|
||||
except (OSError, ValueError):
|
||||
# It doesn't exist -- so not a mount point. :-)
|
||||
return False
|
||||
else:
|
||||
# A symlink can never be a mount point
|
||||
if os.path.stat.S_ISLNK(s1.st_mode):
|
||||
return False
|
||||
|
||||
if isinstance(path, bytes):
|
||||
parent = os.path.join(path, b'..')
|
||||
else:
|
||||
parent = os.path.join(path, '..')
|
||||
parent = os.path.realpath(parent)
|
||||
try:
|
||||
s2 = os.lstat(parent)
|
||||
except (OSError, ValueError):
|
||||
return False
|
||||
|
||||
dev1 = s1.st_dev
|
||||
dev2 = s2.st_dev
|
||||
if dev1 != dev2:
|
||||
return True # path/.. on a different device as path
|
||||
ino1 = s1.st_ino
|
||||
ino2 = s2.st_ino
|
||||
if ino1 == ino2:
|
||||
return True # path/.. is the same i-node as path
|
||||
return False
|
@ -0,0 +1,343 @@
|
||||
# Vendored copy of distutils/version.py from CPython 3.9.5
|
||||
#
|
||||
# Implements multiple version numbering conventions for the
|
||||
# Python Module Distribution Utilities.
|
||||
#
|
||||
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
|
||||
#
|
||||
|
||||
"""Provides classes to represent module version numbers (one class for
|
||||
each style of version numbering). There are currently two such classes
|
||||
implemented: StrictVersion and LooseVersion.
|
||||
|
||||
Every version number class implements the following interface:
|
||||
* the 'parse' method takes a string and parses it to some internal
|
||||
representation; if the string is an invalid version number,
|
||||
'parse' raises a ValueError exception
|
||||
* the class constructor takes an optional string argument which,
|
||||
if supplied, is passed to 'parse'
|
||||
* __str__ reconstructs the string that was passed to 'parse' (or
|
||||
an equivalent string -- ie. one that will generate an equivalent
|
||||
version number instance)
|
||||
* __repr__ generates Python code to recreate the version number instance
|
||||
* _cmp compares the current instance with either another instance
|
||||
of the same class or a string (which will be parsed to an instance
|
||||
of the same class, thus must follow the same rules)
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
|
||||
try:
|
||||
RE_FLAGS = re.VERBOSE | re.ASCII
|
||||
except AttributeError:
|
||||
RE_FLAGS = re.VERBOSE
|
||||
|
||||
|
||||
class Version:
|
||||
"""Abstract base class for version numbering classes. Just provides
|
||||
constructor (__init__) and reproducer (__repr__), because those
|
||||
seem to be the same for all version numbering classes; and route
|
||||
rich comparisons to _cmp.
|
||||
"""
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def __repr__(self):
|
||||
return "%s ('%s')" % (self.__class__.__name__, str(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c == 0
|
||||
|
||||
def __lt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c < 0
|
||||
|
||||
def __le__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c <= 0
|
||||
|
||||
def __gt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c > 0
|
||||
|
||||
def __ge__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c >= 0
|
||||
|
||||
|
||||
# Interface for version-number classes -- must be implemented
|
||||
# by the following classes (the concrete ones -- Version should
|
||||
# be treated as an abstract class).
|
||||
# __init__ (string) - create and take same action as 'parse'
|
||||
# (string parameter is optional)
|
||||
# parse (string) - convert a string representation to whatever
|
||||
# internal representation is appropriate for
|
||||
# this style of version numbering
|
||||
# __str__ (self) - convert back to a string; should be very similar
|
||||
# (if not identical to) the string supplied to parse
|
||||
# __repr__ (self) - generate Python code to recreate
|
||||
# the instance
|
||||
# _cmp (self, other) - compare two version numbers ('other' may
|
||||
# be an unparsed version string, or another
|
||||
# instance of your version class)
|
||||
|
||||
|
||||
class StrictVersion(Version):
|
||||
"""Version numbering for anal retentives and software idealists.
|
||||
Implements the standard interface for version number classes as
|
||||
described above. A version number consists of two or three
|
||||
dot-separated numeric components, with an optional "pre-release" tag
|
||||
on the end. The pre-release tag consists of the letter 'a' or 'b'
|
||||
followed by a number. If the numeric components of two version
|
||||
numbers are equal, then one with a pre-release tag will always
|
||||
be deemed earlier (lesser) than one without.
|
||||
|
||||
The following are valid version numbers (shown in the order that
|
||||
would be obtained by sorting according to the supplied cmp function):
|
||||
|
||||
0.4 0.4.0 (these two are equivalent)
|
||||
0.4.1
|
||||
0.5a1
|
||||
0.5b3
|
||||
0.5
|
||||
0.9.6
|
||||
1.0
|
||||
1.0.4a3
|
||||
1.0.4b1
|
||||
1.0.4
|
||||
|
||||
The following are examples of invalid version numbers:
|
||||
|
||||
1
|
||||
2.7.2.2
|
||||
1.3.a4
|
||||
1.3pl1
|
||||
1.3c4
|
||||
|
||||
The rationale for this version numbering system will be explained
|
||||
in the distutils documentation.
|
||||
"""
|
||||
|
||||
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
|
||||
RE_FLAGS)
|
||||
|
||||
def parse(self, vstring):
|
||||
match = self.version_re.match(vstring)
|
||||
if not match:
|
||||
raise ValueError("invalid version number '%s'" % vstring)
|
||||
|
||||
(major, minor, patch, prerelease, prerelease_num) = \
|
||||
match.group(1, 2, 4, 5, 6)
|
||||
|
||||
if patch:
|
||||
self.version = tuple(map(int, [major, minor, patch]))
|
||||
else:
|
||||
self.version = tuple(map(int, [major, minor])) + (0,)
|
||||
|
||||
if prerelease:
|
||||
self.prerelease = (prerelease[0], int(prerelease_num))
|
||||
else:
|
||||
self.prerelease = None
|
||||
|
||||
def __str__(self):
|
||||
if self.version[2] == 0:
|
||||
vstring = '.'.join(map(str, self.version[0:2]))
|
||||
else:
|
||||
vstring = '.'.join(map(str, self.version))
|
||||
|
||||
if self.prerelease:
|
||||
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
|
||||
|
||||
return vstring
|
||||
|
||||
def _cmp(self, other):
|
||||
if isinstance(other, str):
|
||||
other = StrictVersion(other)
|
||||
elif not isinstance(other, StrictVersion):
|
||||
return NotImplemented
|
||||
|
||||
if self.version != other.version:
|
||||
# numeric versions don't match
|
||||
# prerelease stuff doesn't matter
|
||||
if self.version < other.version:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
|
||||
# have to compare prerelease
|
||||
# case 1: neither has prerelease; they're equal
|
||||
# case 2: self has prerelease, other doesn't; other is greater
|
||||
# case 3: self doesn't have prerelease, other does: self is greater
|
||||
# case 4: both have prerelease: must compare them!
|
||||
|
||||
if (not self.prerelease and not other.prerelease):
|
||||
return 0
|
||||
elif (self.prerelease and not other.prerelease):
|
||||
return -1
|
||||
elif (not self.prerelease and other.prerelease):
|
||||
return 1
|
||||
elif (self.prerelease and other.prerelease):
|
||||
if self.prerelease == other.prerelease:
|
||||
return 0
|
||||
elif self.prerelease < other.prerelease:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
else:
|
||||
raise AssertionError("never get here")
|
||||
|
||||
# end class StrictVersion
|
||||
|
||||
# The rules according to Greg Stein:
|
||||
# 1) a version number has 1 or more numbers separated by a period or by
|
||||
# sequences of letters. If only periods, then these are compared
|
||||
# left-to-right to determine an ordering.
|
||||
# 2) sequences of letters are part of the tuple for comparison and are
|
||||
# compared lexicographically
|
||||
# 3) recognize the numeric components may have leading zeroes
|
||||
#
|
||||
# The LooseVersion class below implements these rules: a version number
|
||||
# string is split up into a tuple of integer and string components, and
|
||||
# comparison is a simple tuple comparison. This means that version
|
||||
# numbers behave in a predictable and obvious way, but a way that might
|
||||
# not necessarily be how people *want* version numbers to behave. There
|
||||
# wouldn't be a problem if people could stick to purely numeric version
|
||||
# numbers: just split on period and compare the numbers as tuples.
|
||||
# However, people insist on putting letters into their version numbers;
|
||||
# the most common purpose seems to be:
|
||||
# - indicating a "pre-release" version
|
||||
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
|
||||
# - indicating a post-release patch ('p', 'pl', 'patch')
|
||||
# but of course this can't cover all version number schemes, and there's
|
||||
# no way to know what a programmer means without asking him.
|
||||
#
|
||||
# The problem is what to do with letters (and other non-numeric
|
||||
# characters) in a version number. The current implementation does the
|
||||
# obvious and predictable thing: keep them as strings and compare
|
||||
# lexically within a tuple comparison. This has the desired effect if
|
||||
# an appended letter sequence implies something "post-release":
|
||||
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
|
||||
#
|
||||
# However, if letters in a version number imply a pre-release version,
|
||||
# the "obvious" thing isn't correct. Eg. you would expect that
|
||||
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
|
||||
# implemented here, this just isn't so.
|
||||
#
|
||||
# Two possible solutions come to mind. The first is to tie the
|
||||
# comparison algorithm to a particular set of semantic rules, as has
|
||||
# been done in the StrictVersion class above. This works great as long
|
||||
# as everyone can go along with bondage and discipline. Hopefully a
|
||||
# (large) subset of Python module programmers will agree that the
|
||||
# particular flavour of bondage and discipline provided by StrictVersion
|
||||
# provides enough benefit to be worth using, and will submit their
|
||||
# version numbering scheme to its domination. The free-thinking
|
||||
# anarchists in the lot will never give in, though, and something needs
|
||||
# to be done to accommodate them.
|
||||
#
|
||||
# Perhaps a "moderately strict" version class could be implemented that
|
||||
# lets almost anything slide (syntactically), and makes some heuristic
|
||||
# assumptions about non-digits in version number strings. This could
|
||||
# sink into special-case-hell, though; if I was as talented and
|
||||
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
|
||||
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
|
||||
# just as happy dealing with things like "2g6" and "1.13++". I don't
|
||||
# think I'm smart enough to do it right though.
|
||||
#
|
||||
# In any case, I've coded the test suite for this module (see
|
||||
# ../test/test_version.py) specifically to fail on things like comparing
|
||||
# "1.2a2" and "1.2". That's not because the *code* is doing anything
|
||||
# wrong, it's because the simple, obvious design doesn't match my
|
||||
# complicated, hairy expectations for real-world version numbers. It
|
||||
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
|
||||
# the Right Thing" (ie. the code matches the conception). But I'd rather
|
||||
# have a conception that matches common notions about version numbers.
|
||||
|
||||
|
||||
class LooseVersion(Version):
|
||||
"""Version numbering for anarchists and software realists.
|
||||
Implements the standard interface for version number classes as
|
||||
described above. A version number consists of a series of numbers,
|
||||
separated by either periods or strings of letters. When comparing
|
||||
version numbers, the numeric components will be compared
|
||||
numerically, and the alphabetic components lexically. The following
|
||||
are all valid version numbers, in no particular order:
|
||||
|
||||
1.5.1
|
||||
1.5.2b2
|
||||
161
|
||||
3.10a
|
||||
8.02
|
||||
3.4j
|
||||
1996.07.12
|
||||
3.2.pl0
|
||||
3.1.1.6
|
||||
2g6
|
||||
11g
|
||||
0.960923
|
||||
2.2beta29
|
||||
1.13++
|
||||
5.5.kw
|
||||
2.0b1pl0
|
||||
|
||||
In fact, there is no such thing as an invalid version number under
|
||||
this scheme; the rules for comparison are simple and predictable,
|
||||
but may not always give the results you want (for some definition
|
||||
of "want").
|
||||
"""
|
||||
|
||||
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def parse(self, vstring):
|
||||
# I've given up on thinking I can reconstruct the version string
|
||||
# from the parsed tuple -- so I just store the string here for
|
||||
# use by __str__
|
||||
self.vstring = vstring
|
||||
components = [x for x in self.component_re.split(vstring) if x and x != '.']
|
||||
for i, obj in enumerate(components):
|
||||
try:
|
||||
components[i] = int(obj)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
self.version = components
|
||||
|
||||
def __str__(self):
|
||||
return self.vstring
|
||||
|
||||
def __repr__(self):
|
||||
return "LooseVersion ('%s')" % str(self)
|
||||
|
||||
def _cmp(self, other):
|
||||
if isinstance(other, str):
|
||||
other = LooseVersion(other)
|
||||
elif not isinstance(other, LooseVersion):
|
||||
return NotImplemented
|
||||
|
||||
if self.version == other.version:
|
||||
return 0
|
||||
if self.version < other.version:
|
||||
return -1
|
||||
if self.version > other.version:
|
||||
return 1
|
||||
|
||||
# end class LooseVersion
|
@ -0,0 +1,287 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import json
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
|
||||
try:
|
||||
import footmark
|
||||
import footmark.ecs
|
||||
import footmark.slb
|
||||
import footmark.vpc
|
||||
import footmark.rds
|
||||
import footmark.ess
|
||||
import footmark.sts
|
||||
import footmark.dns
|
||||
import footmark.ram
|
||||
import footmark.market
|
||||
HAS_FOOTMARK = True
|
||||
except ImportError:
|
||||
HAS_FOOTMARK = False
|
||||
|
||||
|
||||
class AnsibleACSError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def acs_common_argument_spec():
|
||||
return dict(
|
||||
alicloud_access_key=dict(aliases=['access_key_id', 'access_key'], no_log=True,
|
||||
fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
|
||||
alicloud_secret_key=dict(aliases=['secret_access_key', 'secret_key'], no_log=True,
|
||||
fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
|
||||
alicloud_security_token=dict(aliases=['security_token'], no_log=True,
|
||||
fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
|
||||
ecs_role_name=dict(aliases=['role_name'], fallback=(env_fallback, ['ALICLOUD_ECS_ROLE_NAME']))
|
||||
)
|
||||
|
||||
|
||||
def ecs_argument_spec():
|
||||
spec = acs_common_argument_spec()
|
||||
spec.update(
|
||||
dict(
|
||||
alicloud_region=dict(required=True, aliases=['region', 'region_id'],
|
||||
fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
|
||||
alicloud_assume_role_arn=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_ARN']),
|
||||
aliases=['assume_role_arn']),
|
||||
alicloud_assume_role_session_name=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_SESSION_NAME']),
|
||||
aliases=['assume_role_session_name']),
|
||||
alicloud_assume_role_session_expiration=dict(type='int',
|
||||
fallback=(env_fallback,
|
||||
['ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION']),
|
||||
aliases=['assume_role_session_expiration']),
|
||||
alicloud_assume_role=dict(type='dict', aliases=['assume_role']),
|
||||
profile=dict(fallback=(env_fallback, ['ALICLOUD_PROFILE'])),
|
||||
shared_credentials_file=dict(fallback=(env_fallback, ['ALICLOUD_SHARED_CREDENTIALS_FILE']))
|
||||
)
|
||||
)
|
||||
return spec
|
||||
|
||||
|
||||
def get_acs_connection_info(params):
|
||||
|
||||
ecs_params = dict(acs_access_key_id=params.get('alicloud_access_key'),
|
||||
acs_secret_access_key=params.get('alicloud_secret_key'),
|
||||
security_token=params.get('alicloud_security_token'),
|
||||
ecs_role_name=params.get('ecs_role_name'),
|
||||
user_agent='Ansible-Provider-Alicloud')
|
||||
return ecs_params
|
||||
|
||||
|
||||
def connect_to_acs(acs_module, region, **params):
|
||||
conn = acs_module.connect_to_region(region, **params)
|
||||
if not conn:
|
||||
if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
|
||||
raise AnsibleACSError(
|
||||
"Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__))
|
||||
else:
|
||||
raise AnsibleACSError(
|
||||
"Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__))
|
||||
return conn
|
||||
|
||||
|
||||
def get_assume_role(params):
|
||||
""" Return new params """
|
||||
sts_params = get_acs_connection_info(params)
|
||||
assume_role = {}
|
||||
if params.get('assume_role'):
|
||||
assume_role['alicloud_assume_role_arn'] = params['assume_role'].get('role_arn')
|
||||
assume_role['alicloud_assume_role_session_name'] = params['assume_role'].get('session_name')
|
||||
assume_role['alicloud_assume_role_session_expiration'] = params['assume_role'].get('session_expiration')
|
||||
assume_role['alicloud_assume_role_policy'] = params['assume_role'].get('policy')
|
||||
|
||||
assume_role_params = {
|
||||
'role_arn': params.get('alicloud_assume_role_arn') if params.get('alicloud_assume_role_arn') else assume_role.get('alicloud_assume_role_arn'),
|
||||
'role_session_name': params.get('alicloud_assume_role_session_name') if params.get('alicloud_assume_role_session_name')
|
||||
else assume_role.get('alicloud_assume_role_session_name'),
|
||||
'duration_seconds': params.get('alicloud_assume_role_session_expiration') if params.get('alicloud_assume_role_session_expiration')
|
||||
else assume_role.get('alicloud_assume_role_session_expiration', 3600),
|
||||
'policy': assume_role.get('alicloud_assume_role_policy', {})
|
||||
}
|
||||
|
||||
try:
|
||||
sts = connect_to_acs(footmark.sts, params.get('alicloud_region'), **sts_params).assume_role(**assume_role_params).read()
|
||||
sts_params['acs_access_key_id'], sts_params['acs_secret_access_key'], sts_params['security_token'] \
|
||||
= sts['access_key_id'], sts['access_key_secret'], sts['security_token']
|
||||
except AnsibleACSError as e:
|
||||
params.fail_json(msg=str(e))
|
||||
return sts_params
|
||||
|
||||
|
||||
def get_profile(params):
|
||||
if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']:
|
||||
path = params['shared_credentials_file'] if params['shared_credentials_file'] else os.getenv('HOME') + '/.aliyun/config.json'
|
||||
auth = {}
|
||||
with open(path, 'r') as f:
|
||||
for pro in json.load(f)['profiles']:
|
||||
if params['profile'] == pro['name']:
|
||||
auth = pro
|
||||
if auth:
|
||||
if auth['mode'] == 'AK' and auth.get('access_key_id') and auth.get('access_key_secret'):
|
||||
params['alicloud_access_key'] = auth.get('access_key_id')
|
||||
params['alicloud_secret_key'] = auth.get('access_key_secret')
|
||||
params['alicloud_region'] = auth.get('region_id')
|
||||
params = get_acs_connection_info(params)
|
||||
elif auth['mode'] == 'StsToken' and auth.get('access_key_id') and auth.get('access_key_secret') and auth.get('sts_token'):
|
||||
params['alicloud_access_key'] = auth.get('access_key_id')
|
||||
params['alicloud_secret_key'] = auth.get('access_key_secret')
|
||||
params['security_token'] = auth.get('sts_token')
|
||||
params['alicloud_region'] = auth.get('region_id')
|
||||
params = get_acs_connection_info(params)
|
||||
elif auth['mode'] == 'EcsRamRole':
|
||||
params['ecs_role_name'] = auth.get('ram_role_name')
|
||||
params['alicloud_region'] = auth.get('region_id')
|
||||
params = get_acs_connection_info(params)
|
||||
elif auth['mode'] == 'RamRoleArn' and auth.get('ram_role_arn'):
|
||||
params['alicloud_access_key'] = auth.get('access_key_id')
|
||||
params['alicloud_secret_key'] = auth.get('access_key_secret')
|
||||
params['security_token'] = auth.get('sts_token')
|
||||
params['ecs_role_name'] = auth.get('ram_role_name')
|
||||
params['alicloud_assume_role_arn'] = auth.get('ram_role_arn')
|
||||
params['alicloud_assume_role_session_name'] = auth.get('ram_session_name')
|
||||
params['alicloud_assume_role_session_expiration'] = auth.get('expired_seconds')
|
||||
params['alicloud_region'] = auth.get('region_id')
|
||||
params = get_assume_role(params)
|
||||
elif params.get('alicloud_assume_role_arn') or params.get('assume_role'):
|
||||
params = get_assume_role(params)
|
||||
else:
|
||||
params = get_acs_connection_info(params)
|
||||
return params
|
||||
|
||||
|
||||
def ecs_connect(module):
|
||||
""" Return an ecs connection"""
|
||||
ecs_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return ecs
|
||||
|
||||
|
||||
def slb_connect(module):
|
||||
""" Return an slb connection"""
|
||||
slb_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
slb = connect_to_acs(footmark.slb, region, **slb_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return slb
|
||||
|
||||
|
||||
def dns_connect(module):
|
||||
""" Return an dns connection"""
|
||||
dns_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
dns = connect_to_acs(footmark.dns, region, **dns_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return dns
|
||||
|
||||
|
||||
def vpc_connect(module):
|
||||
""" Return an vpc connection"""
|
||||
vpc_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return vpc
|
||||
|
||||
|
||||
def rds_connect(module):
|
||||
""" Return an rds connection"""
|
||||
rds_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
rds = connect_to_acs(footmark.rds, region, **rds_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return rds
|
||||
|
||||
|
||||
def ess_connect(module):
|
||||
""" Return an ess connection"""
|
||||
ess_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
ess = connect_to_acs(footmark.ess, region, **ess_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return ess
|
||||
|
||||
|
||||
def sts_connect(module):
|
||||
""" Return an sts connection"""
|
||||
sts_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
sts = connect_to_acs(footmark.sts, region, **sts_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return sts
|
||||
|
||||
|
||||
def ram_connect(module):
|
||||
""" Return an ram connection"""
|
||||
ram_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
ram = connect_to_acs(footmark.ram, region, **ram_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return ram
|
||||
|
||||
|
||||
def market_connect(module):
|
||||
""" Return an market connection"""
|
||||
market_params = get_profile(module.params)
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
region = module.params.get('alicloud_region')
|
||||
if region:
|
||||
try:
|
||||
market = connect_to_acs(footmark.market, region, **market_params)
|
||||
except AnsibleACSError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
return market
|
@ -0,0 +1,209 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
"""
|
||||
This module adds shared support for generic cloud modules
|
||||
|
||||
In order to use this module, include it as part of a custom
|
||||
module as shown below.
|
||||
|
||||
from ansible.module_utils.cloud import CloudRetry
|
||||
|
||||
The 'cloud' module provides the following common classes:
|
||||
|
||||
* CloudRetry
|
||||
- The base class to be used by other cloud providers, in order to
|
||||
provide a backoff/retry decorator based on status codes.
|
||||
|
||||
- Example using the AWSRetry class which inherits from CloudRetry.
|
||||
|
||||
@AWSRetry.exponential_backoff(retries=10, delay=3)
|
||||
get_ec2_security_group_ids_from_names()
|
||||
|
||||
@AWSRetry.jittered_backoff()
|
||||
get_ec2_security_group_ids_from_names()
|
||||
|
||||
"""
|
||||
import random
|
||||
from functools import wraps
|
||||
import syslog
|
||||
import time
|
||||
|
||||
|
||||
def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
|
||||
""" Customizable exponential backoff strategy.
|
||||
Args:
|
||||
retries (int): Maximum number of times to retry a request.
|
||||
delay (float): Initial (base) delay.
|
||||
backoff (float): base of the exponent to use for exponential
|
||||
backoff.
|
||||
max_delay (int): Optional. If provided each delay generated is capped
|
||||
at this amount. Defaults to 60 seconds.
|
||||
Returns:
|
||||
Callable that returns a generator. This generator yields durations in
|
||||
seconds to be used as delays for an exponential backoff strategy.
|
||||
Usage:
|
||||
>>> backoff = _exponential_backoff()
|
||||
>>> backoff
|
||||
<function backoff_backoff at 0x7f0d939facf8>
|
||||
>>> list(backoff())
|
||||
[2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
|
||||
"""
|
||||
def backoff_gen():
|
||||
for retry in range(0, retries):
|
||||
sleep = delay * backoff ** retry
|
||||
yield sleep if max_delay is None else min(sleep, max_delay)
|
||||
return backoff_gen
|
||||
|
||||
|
||||
def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
|
||||
""" Implements the "Full Jitter" backoff strategy described here
|
||||
https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||
Args:
|
||||
retries (int): Maximum number of times to retry a request.
|
||||
delay (float): Approximate number of seconds to sleep for the first
|
||||
retry.
|
||||
max_delay (int): The maximum number of seconds to sleep for any retry.
|
||||
_random (random.Random or None): Makes this generator testable by
|
||||
allowing developers to explicitly pass in the a seeded Random.
|
||||
Returns:
|
||||
Callable that returns a generator. This generator yields durations in
|
||||
seconds to be used as delays for a full jitter backoff strategy.
|
||||
Usage:
|
||||
>>> backoff = _full_jitter_backoff(retries=5)
|
||||
>>> backoff
|
||||
<function backoff_backoff at 0x7f0d939facf8>
|
||||
>>> list(backoff())
|
||||
[3, 6, 5, 23, 38]
|
||||
>>> list(backoff())
|
||||
[2, 1, 6, 6, 31]
|
||||
"""
|
||||
def backoff_gen():
|
||||
for retry in range(0, retries):
|
||||
yield _random.randint(0, min(max_delay, delay * 2 ** retry))
|
||||
return backoff_gen
|
||||
|
||||
|
||||
class CloudRetry(object):
|
||||
""" CloudRetry can be used by any cloud provider, in order to implement a
|
||||
backoff algorithm/retry effect based on Status Code from Exceptions.
|
||||
"""
|
||||
# This is the base class of the exception.
|
||||
# AWS Example botocore.exceptions.ClientError
|
||||
base_class = None
|
||||
|
||||
@staticmethod
|
||||
def status_code_from_exception(error):
|
||||
""" Return the status code from the exception object
|
||||
Args:
|
||||
error (object): The exception itself.
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def found(response_code, catch_extra_error_codes=None):
|
||||
""" Return True if the Response Code to retry on was found.
|
||||
Args:
|
||||
response_code (str): This is the Response Code that is being matched against.
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
|
||||
""" Retry calling the Cloud decorated function using the provided
|
||||
backoff strategy.
|
||||
Args:
|
||||
backoff_strategy (callable): Callable that returns a generator. The
|
||||
generator should yield sleep times for each retry of the decorated
|
||||
function.
|
||||
"""
|
||||
def deco(f):
|
||||
@wraps(f)
|
||||
def retry_func(*args, **kwargs):
|
||||
for delay in backoff_strategy():
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type
|
||||
response_code = cls.status_code_from_exception(e)
|
||||
if cls.found(response_code, catch_extra_error_codes):
|
||||
msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
|
||||
syslog.syslog(syslog.LOG_INFO, msg)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
# Return original exception if exception is not a ClientError
|
||||
raise e
|
||||
else:
|
||||
# Return original exception if exception is not a ClientError
|
||||
raise e
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return retry_func # true decorator
|
||||
|
||||
return deco
|
||||
|
||||
@classmethod
|
||||
def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
|
||||
"""
|
||||
Retry calling the Cloud decorated function using an exponential backoff.
|
||||
|
||||
Kwargs:
|
||||
retries (int): Number of times to retry a failed request before giving up
|
||||
default=10
|
||||
delay (int or float): Initial delay between retries in seconds
|
||||
default=3
|
||||
backoff (int or float): backoff multiplier e.g. value of 2 will
|
||||
double the delay each retry
|
||||
default=1.1
|
||||
max_delay (int or None): maximum amount of time to wait between retries.
|
||||
default=60
|
||||
"""
|
||||
return cls._backoff(_exponential_backoff(
|
||||
retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
|
||||
|
||||
@classmethod
|
||||
def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
|
||||
"""
|
||||
Retry calling the Cloud decorated function using a jittered backoff
|
||||
strategy. More on this strategy here:
|
||||
|
||||
https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||
|
||||
Kwargs:
|
||||
retries (int): Number of times to retry a failed request before giving up
|
||||
default=10
|
||||
delay (int): Initial delay between retries in seconds
|
||||
default=3
|
||||
max_delay (int): maximum amount of time to wait between retries.
|
||||
default=60
|
||||
"""
|
||||
return cls._backoff(_full_jitter_backoff(
|
||||
retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
|
||||
|
||||
@classmethod
|
||||
def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
|
||||
"""
|
||||
Retry calling the Cloud decorated function using an exponential backoff.
|
||||
|
||||
Compatibility for the original implementation of CloudRetry.backoff that
|
||||
did not provide configurable backoff strategies. Developers should use
|
||||
CloudRetry.exponential_backoff instead.
|
||||
|
||||
Kwargs:
|
||||
tries (int): Number of times to try (not retry) before giving up
|
||||
default=10
|
||||
delay (int or float): Initial delay between retries in seconds
|
||||
default=3
|
||||
backoff (int or float): backoff multiplier e.g. value of 2 will
|
||||
double the delay each retry
|
||||
default=1.1
|
||||
"""
|
||||
return cls.exponential_backoff(
|
||||
retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
|
@ -0,0 +1,67 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import csv
|
||||
from io import BytesIO, StringIO
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
|
||||
class CustomDialectFailureError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DialectNotAvailableError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
CSVError = csv.Error
|
||||
|
||||
|
||||
def initialize_dialect(dialect, **kwargs):
|
||||
# Add Unix dialect from Python 3
|
||||
class unix_dialect(csv.Dialect):
|
||||
"""Describe the usual properties of Unix-generated CSV files."""
|
||||
delimiter = ','
|
||||
quotechar = '"'
|
||||
doublequote = True
|
||||
skipinitialspace = False
|
||||
lineterminator = '\n'
|
||||
quoting = csv.QUOTE_ALL
|
||||
|
||||
csv.register_dialect("unix", unix_dialect)
|
||||
|
||||
if dialect not in csv.list_dialects():
|
||||
raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
|
||||
|
||||
# Create a dictionary from only set options
|
||||
dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
if dialect_params:
|
||||
try:
|
||||
csv.register_dialect('custom', dialect, **dialect_params)
|
||||
except TypeError as e:
|
||||
raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
|
||||
dialect = 'custom'
|
||||
|
||||
return dialect
|
||||
|
||||
|
||||
def read_csv(data, dialect, fieldnames=None):
|
||||
|
||||
data = to_native(data, errors='surrogate_or_strict')
|
||||
|
||||
if PY3:
|
||||
fake_fh = StringIO(data)
|
||||
else:
|
||||
fake_fh = BytesIO(data)
|
||||
|
||||
reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
|
||||
|
||||
return reader
|
@ -0,0 +1,190 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
|
||||
|
||||
# Input patterns for is_input_dangerous function:
|
||||
#
|
||||
# 1. '"' in string and '--' in string or
|
||||
# "'" in string and '--' in string
|
||||
PATTERN_1 = re.compile(r'(\'|\").*--')
|
||||
|
||||
# 2. union \ intersect \ except + select
|
||||
PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE)
|
||||
|
||||
# 3. ';' and any KEY_WORDS
|
||||
PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE)
|
||||
|
||||
|
||||
class SQLParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnclosedQuoteError(SQLParseError):
|
||||
pass
|
||||
|
||||
|
||||
# maps a type of identifier to the maximum number of dot levels that are
|
||||
# allowed to specify that identifier. For example, a database column can be
|
||||
# specified by up to 4 levels: database.schema.table.column
|
||||
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
|
||||
database=1,
|
||||
schema=2,
|
||||
table=3,
|
||||
column=4,
|
||||
role=1,
|
||||
tablespace=1,
|
||||
sequence=3,
|
||||
publication=1,
|
||||
)
|
||||
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
|
||||
|
||||
|
||||
def _find_end_quote(identifier, quote_char):
|
||||
accumulate = 0
|
||||
while True:
|
||||
try:
|
||||
quote = identifier.index(quote_char)
|
||||
except ValueError:
|
||||
raise UnclosedQuoteError
|
||||
accumulate = accumulate + quote
|
||||
try:
|
||||
next_char = identifier[quote + 1]
|
||||
except IndexError:
|
||||
return accumulate
|
||||
if next_char == quote_char:
|
||||
try:
|
||||
identifier = identifier[quote + 2:]
|
||||
accumulate = accumulate + 2
|
||||
except IndexError:
|
||||
raise UnclosedQuoteError
|
||||
else:
|
||||
return accumulate
|
||||
|
||||
|
||||
def _identifier_parse(identifier, quote_char):
|
||||
if not identifier:
|
||||
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
|
||||
|
||||
already_quoted = False
|
||||
if identifier.startswith(quote_char):
|
||||
already_quoted = True
|
||||
try:
|
||||
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
|
||||
except UnclosedQuoteError:
|
||||
already_quoted = False
|
||||
else:
|
||||
if end_quote < len(identifier) - 1:
|
||||
if identifier[end_quote + 1] == '.':
|
||||
dot = end_quote + 1
|
||||
first_identifier = identifier[:dot]
|
||||
next_identifier = identifier[dot + 1:]
|
||||
further_identifiers = _identifier_parse(next_identifier, quote_char)
|
||||
further_identifiers.insert(0, first_identifier)
|
||||
else:
|
||||
raise SQLParseError('User escaped identifiers must escape extra quotes')
|
||||
else:
|
||||
further_identifiers = [identifier]
|
||||
|
||||
if not already_quoted:
|
||||
try:
|
||||
dot = identifier.index('.')
|
||||
except ValueError:
|
||||
identifier = identifier.replace(quote_char, quote_char * 2)
|
||||
identifier = ''.join((quote_char, identifier, quote_char))
|
||||
further_identifiers = [identifier]
|
||||
else:
|
||||
if dot == 0 or dot >= len(identifier) - 1:
|
||||
identifier = identifier.replace(quote_char, quote_char * 2)
|
||||
identifier = ''.join((quote_char, identifier, quote_char))
|
||||
further_identifiers = [identifier]
|
||||
else:
|
||||
first_identifier = identifier[:dot]
|
||||
next_identifier = identifier[dot + 1:]
|
||||
further_identifiers = _identifier_parse(next_identifier, quote_char)
|
||||
first_identifier = first_identifier.replace(quote_char, quote_char * 2)
|
||||
first_identifier = ''.join((quote_char, first_identifier, quote_char))
|
||||
further_identifiers.insert(0, first_identifier)
|
||||
|
||||
return further_identifiers
|
||||
|
||||
|
||||
def pg_quote_identifier(identifier, id_type):
|
||||
identifier_fragments = _identifier_parse(identifier, quote_char='"')
|
||||
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
|
||||
raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
|
||||
return '.'.join(identifier_fragments)
|
||||
|
||||
|
||||
def mysql_quote_identifier(identifier, id_type):
|
||||
identifier_fragments = _identifier_parse(identifier, quote_char='`')
|
||||
if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
|
||||
raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
|
||||
|
||||
special_cased_fragments = []
|
||||
for fragment in identifier_fragments:
|
||||
if fragment == '`*`':
|
||||
special_cased_fragments.append('*')
|
||||
else:
|
||||
special_cased_fragments.append(fragment)
|
||||
|
||||
return '.'.join(special_cased_fragments)
|
||||
|
||||
|
||||
def is_input_dangerous(string):
|
||||
"""Check if the passed string is potentially dangerous.
|
||||
Can be used to prevent SQL injections.
|
||||
|
||||
Note: use this function only when you can't use
|
||||
psycopg2's cursor.execute method parametrized
|
||||
(typically with DDL queries).
|
||||
"""
|
||||
if not string:
|
||||
return False
|
||||
|
||||
for pattern in (PATTERN_1, PATTERN_2, PATTERN_3):
|
||||
if re.search(pattern, string):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def check_input(module, *args):
|
||||
"""Wrapper for is_input_dangerous function."""
|
||||
needs_to_check = args
|
||||
|
||||
dangerous_elements = []
|
||||
|
||||
for elem in needs_to_check:
|
||||
if isinstance(elem, str):
|
||||
if is_input_dangerous(elem):
|
||||
dangerous_elements.append(elem)
|
||||
|
||||
elif isinstance(elem, list):
|
||||
for e in elem:
|
||||
if is_input_dangerous(e):
|
||||
dangerous_elements.append(e)
|
||||
|
||||
elif elem is None or isinstance(elem, bool):
|
||||
pass
|
||||
|
||||
else:
|
||||
elem = str(elem)
|
||||
if is_input_dangerous(elem):
|
||||
dangerous_elements.append(elem)
|
||||
|
||||
if dangerous_elements:
|
||||
module.fail_json(msg="Passed input '%s' is "
|
||||
"potentially dangerous" % ', '.join(dangerous_elements))
|
@ -0,0 +1,330 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Dimension Data
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
# Authors:
|
||||
# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
|
||||
# - Mark Maglana <mmaglana@gmail.com>
|
||||
# - Adam Friedman <tintoy@tintoy.io>
|
||||
#
|
||||
# Common functionality to be used by various module components
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
from os.path import expanduser
|
||||
from uuid import UUID
|
||||
|
||||
LIBCLOUD_IMP_ERR = None
|
||||
try:
|
||||
from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus
|
||||
from libcloud.compute.base import Node, NodeLocation
|
||||
from libcloud.compute.providers import get_driver
|
||||
from libcloud.compute.types import Provider
|
||||
|
||||
import libcloud.security
|
||||
|
||||
HAS_LIBCLOUD = True
|
||||
except ImportError:
|
||||
LIBCLOUD_IMP_ERR = traceback.format_exc()
|
||||
HAS_LIBCLOUD = False
|
||||
|
||||
# MCP 2.x version patten for location (datacenter) names.
|
||||
#
|
||||
# Note that this is not a totally reliable way of determining MCP version.
|
||||
# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
|
||||
# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
|
||||
# by specifying it in the module parameters.
|
||||
MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
|
||||
|
||||
|
||||
class DimensionDataModule(object):
|
||||
"""
|
||||
The base class containing common functionality used by Dimension Data modules for Ansible.
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Create a new DimensionDataModule.
|
||||
|
||||
Will fail if Apache libcloud is not present.
|
||||
|
||||
:param module: The underlying Ansible module.
|
||||
:type module: AnsibleModule
|
||||
"""
|
||||
|
||||
self.module = module
|
||||
|
||||
if not HAS_LIBCLOUD:
|
||||
self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
|
||||
|
||||
# Credentials are common to all Dimension Data modules.
|
||||
credentials = self.get_credentials()
|
||||
self.user_id = credentials['user_id']
|
||||
self.key = credentials['key']
|
||||
|
||||
# Region and location are common to all Dimension Data modules.
|
||||
region = self.module.params['region']
|
||||
self.region = 'dd-{0}'.format(region)
|
||||
self.location = self.module.params['location']
|
||||
|
||||
libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
|
||||
|
||||
self.driver = get_driver(Provider.DIMENSIONDATA)(
|
||||
self.user_id,
|
||||
self.key,
|
||||
region=self.region
|
||||
)
|
||||
|
||||
# Determine the MCP API version (this depends on the target datacenter).
|
||||
self.mcp_version = self.get_mcp_version(self.location)
|
||||
|
||||
# Optional "wait-for-completion" arguments
|
||||
if 'wait' in self.module.params:
|
||||
self.wait = self.module.params['wait']
|
||||
self.wait_time = self.module.params['wait_time']
|
||||
self.wait_poll_interval = self.module.params['wait_poll_interval']
|
||||
else:
|
||||
self.wait = False
|
||||
self.wait_time = 0
|
||||
self.wait_poll_interval = 0
|
||||
|
||||
def get_credentials(self):
|
||||
"""
|
||||
Get user_id and key from module configuration, environment, or dotfile.
|
||||
Order of priority is module, environment, dotfile.
|
||||
|
||||
To set in environment:
|
||||
|
||||
export MCP_USER='myusername'
|
||||
export MCP_PASSWORD='mypassword'
|
||||
|
||||
To set in dot file place a file at ~/.dimensiondata with
|
||||
the following contents:
|
||||
|
||||
[dimensiondatacloud]
|
||||
MCP_USER: myusername
|
||||
MCP_PASSWORD: mypassword
|
||||
"""
|
||||
|
||||
if not HAS_LIBCLOUD:
|
||||
self.module.fail_json(msg='libcloud is required for this module.')
|
||||
|
||||
user_id = None
|
||||
key = None
|
||||
|
||||
# First, try the module configuration
|
||||
if 'mcp_user' in self.module.params:
|
||||
if 'mcp_password' not in self.module.params:
|
||||
self.module.fail_json(
|
||||
msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
|
||||
)
|
||||
|
||||
user_id = self.module.params['mcp_user']
|
||||
key = self.module.params['mcp_password']
|
||||
|
||||
# Fall back to environment
|
||||
if not user_id or not key:
|
||||
user_id = os.environ.get('MCP_USER', None)
|
||||
key = os.environ.get('MCP_PASSWORD', None)
|
||||
|
||||
# Finally, try dotfile (~/.dimensiondata)
|
||||
if not user_id or not key:
|
||||
home = expanduser('~')
|
||||
config = configparser.RawConfigParser()
|
||||
config.read("%s/.dimensiondata" % home)
|
||||
|
||||
try:
|
||||
user_id = config.get("dimensiondatacloud", "MCP_USER")
|
||||
key = config.get("dimensiondatacloud", "MCP_PASSWORD")
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
pass
|
||||
|
||||
# One or more credentials not found. Function can't recover from this
|
||||
# so it has to raise an error instead of fail silently.
|
||||
if not user_id:
|
||||
raise MissingCredentialsError("Dimension Data user id not found")
|
||||
elif not key:
|
||||
raise MissingCredentialsError("Dimension Data key not found")
|
||||
|
||||
# Both found, return data
|
||||
return dict(user_id=user_id, key=key)
|
||||
|
||||
def get_mcp_version(self, location):
|
||||
"""
|
||||
Get the MCP version for the specified location.
|
||||
"""
|
||||
|
||||
location = self.driver.ex_get_location_by_id(location)
|
||||
if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
|
||||
return '2.0'
|
||||
|
||||
return '1.0'
|
||||
|
||||
def get_network_domain(self, locator, location):
|
||||
"""
|
||||
Retrieve a network domain by its name or Id.
|
||||
"""
|
||||
|
||||
if is_uuid(locator):
|
||||
network_domain = self.driver.ex_get_network_domain(locator)
|
||||
else:
|
||||
matching_network_domains = [
|
||||
network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
|
||||
if network_domain.name == locator
|
||||
]
|
||||
|
||||
if matching_network_domains:
|
||||
network_domain = matching_network_domains[0]
|
||||
else:
|
||||
network_domain = None
|
||||
|
||||
if network_domain:
|
||||
return network_domain
|
||||
|
||||
raise UnknownNetworkError("Network '%s' could not be found" % locator)
|
||||
|
||||
def get_vlan(self, locator, location, network_domain):
|
||||
"""
|
||||
Get a VLAN object by its name or id
|
||||
"""
|
||||
if is_uuid(locator):
|
||||
vlan = self.driver.ex_get_vlan(locator)
|
||||
else:
|
||||
matching_vlans = [
|
||||
vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
|
||||
if vlan.name == locator
|
||||
]
|
||||
|
||||
if matching_vlans:
|
||||
vlan = matching_vlans[0]
|
||||
else:
|
||||
vlan = None
|
||||
|
||||
if vlan:
|
||||
return vlan
|
||||
|
||||
raise UnknownVLANError("VLAN '%s' could not be found" % locator)
|
||||
|
||||
@staticmethod
|
||||
def argument_spec(**additional_argument_spec):
|
||||
"""
|
||||
Build an argument specification for a Dimension Data module.
|
||||
:param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
|
||||
:return: A dict containing the argument specification.
|
||||
"""
|
||||
|
||||
spec = dict(
|
||||
region=dict(type='str', default='na'),
|
||||
mcp_user=dict(type='str', required=False),
|
||||
mcp_password=dict(type='str', required=False, no_log=True),
|
||||
location=dict(type='str', required=True),
|
||||
validate_certs=dict(type='bool', required=False, default=True)
|
||||
)
|
||||
|
||||
if additional_argument_spec:
|
||||
spec.update(additional_argument_spec)
|
||||
|
||||
return spec
|
||||
|
||||
@staticmethod
|
||||
def argument_spec_with_wait(**additional_argument_spec):
|
||||
"""
|
||||
Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
|
||||
:param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
|
||||
:return: A dict containing the argument specification.
|
||||
"""
|
||||
|
||||
spec = DimensionDataModule.argument_spec(
|
||||
wait=dict(type='bool', required=False, default=False),
|
||||
wait_time=dict(type='int', required=False, default=600),
|
||||
wait_poll_interval=dict(type='int', required=False, default=2)
|
||||
)
|
||||
|
||||
if additional_argument_spec:
|
||||
spec.update(additional_argument_spec)
|
||||
|
||||
return spec
|
||||
|
||||
@staticmethod
|
||||
def required_together(*additional_required_together):
|
||||
"""
|
||||
Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
|
||||
:param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
|
||||
:return: An array containing the argument specifications.
|
||||
"""
|
||||
|
||||
required_together = [
|
||||
['mcp_user', 'mcp_password']
|
||||
]
|
||||
|
||||
if additional_required_together:
|
||||
required_together.extend(additional_required_together)
|
||||
|
||||
return required_together
|
||||
|
||||
|
||||
class LibcloudNotFound(Exception):
|
||||
"""
|
||||
Exception raised when Apache libcloud cannot be found.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MissingCredentialsError(Exception):
|
||||
"""
|
||||
Exception raised when credentials for Dimension Data CloudControl cannot be found.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class UnknownNetworkError(Exception):
|
||||
"""
|
||||
Exception raised when a network or network domain cannot be found.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class UnknownVLANError(Exception):
|
||||
"""
|
||||
Exception raised when a VLAN cannot be found.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def get_dd_regions():
|
||||
"""
|
||||
Get the list of available regions whose vendor is Dimension Data.
|
||||
"""
|
||||
|
||||
# Get endpoints
|
||||
all_regions = API_ENDPOINTS.keys()
|
||||
|
||||
# Only Dimension Data endpoints (no prefix)
|
||||
regions = [region[3:] for region in all_regions if region.startswith('dd-')]
|
||||
|
||||
return regions
|
||||
|
||||
|
||||
def is_uuid(u, version=4):
|
||||
"""
|
||||
Test if valid v4 UUID
|
||||
"""
|
||||
try:
|
||||
uuid_obj = UUID(u, version=version)
|
||||
|
||||
return str(uuid_obj) == u
|
||||
except ValueError:
|
||||
return False
|
@ -0,0 +1,234 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
class GandiLiveDNSAPI(object):
|
||||
|
||||
api_endpoint = 'https://api.gandi.net/v5/livedns'
|
||||
changed = False
|
||||
|
||||
error_strings = {
|
||||
400: 'Bad request',
|
||||
401: 'Permission denied',
|
||||
404: 'Resource not found',
|
||||
}
|
||||
|
||||
attribute_map = {
|
||||
'record': 'rrset_name',
|
||||
'type': 'rrset_type',
|
||||
'ttl': 'rrset_ttl',
|
||||
'values': 'rrset_values'
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.api_key = module.params['api_key']
|
||||
|
||||
def _build_error_message(self, module, info):
|
||||
s = ''
|
||||
body = info.get('body')
|
||||
if body:
|
||||
errors = module.from_json(body).get('errors')
|
||||
if errors:
|
||||
error = errors[0]
|
||||
name = error.get('name')
|
||||
if name:
|
||||
s += '{0} :'.format(name)
|
||||
description = error.get('description')
|
||||
if description:
|
||||
s += description
|
||||
return s
|
||||
|
||||
def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True):
|
||||
headers = {'Authorization': 'Apikey {0}'.format(self.api_key),
|
||||
'Content-Type': 'application/json'}
|
||||
data = None
|
||||
if payload:
|
||||
try:
|
||||
data = json.dumps(payload)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
|
||||
|
||||
resp, info = fetch_url(self.module,
|
||||
self.api_endpoint + api_call,
|
||||
headers=headers,
|
||||
data=data,
|
||||
method=method)
|
||||
|
||||
error_msg = ''
|
||||
if info['status'] >= 400 and (info['status'] != 404 or error_on_404):
|
||||
err_s = self.error_strings.get(info['status'], '')
|
||||
|
||||
error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info))
|
||||
|
||||
result = None
|
||||
try:
|
||||
content = resp.read()
|
||||
except AttributeError:
|
||||
content = None
|
||||
|
||||
if content:
|
||||
try:
|
||||
result = json.loads(to_text(content, errors='surrogate_or_strict'))
|
||||
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
|
||||
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
|
||||
|
||||
if error_msg:
|
||||
self.module.fail_json(msg=error_msg)
|
||||
|
||||
return result, info['status']
|
||||
|
||||
def build_result(self, result, domain):
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
res = {}
|
||||
for k in self.attribute_map:
|
||||
v = result.get(self.attribute_map[k], None)
|
||||
if v is not None:
|
||||
if k == 'record' and v == '@':
|
||||
v = ''
|
||||
res[k] = v
|
||||
|
||||
res['domain'] = domain
|
||||
|
||||
return res
|
||||
|
||||
def build_results(self, results, domain):
|
||||
if results is None:
|
||||
return []
|
||||
return [self.build_result(r, domain) for r in results]
|
||||
|
||||
def get_records(self, record, type, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
if record:
|
||||
url += '/%s' % (record)
|
||||
if type:
|
||||
url += '/%s' % (type)
|
||||
|
||||
records, status = self._gandi_api_call(url, error_on_404=False)
|
||||
|
||||
if status == 404:
|
||||
return []
|
||||
|
||||
if not isinstance(records, list):
|
||||
records = [records]
|
||||
|
||||
# filter by type if record is not set
|
||||
if not record and type:
|
||||
records = [r
|
||||
for r in records
|
||||
if r['rrset_type'] == type]
|
||||
|
||||
return records
|
||||
|
||||
def create_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
new_record = {
|
||||
'rrset_name': record,
|
||||
'rrset_type': type,
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record, status = self._gandi_api_call(url, method='POST', payload=new_record)
|
||||
|
||||
if status in (200, 201,):
|
||||
return new_record
|
||||
|
||||
return None
|
||||
|
||||
def update_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
new_record = {
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record = self._gandi_api_call(url, method='PUT', payload=new_record)[0]
|
||||
return record
|
||||
|
||||
def delete_record(self, record, type, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
|
||||
self._gandi_api_call(url, method='DELETE')
|
||||
|
||||
def delete_dns_record(self, record, type, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
self.changed = True
|
||||
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
new_values = set(cur_record['rrset_values']) - set(values)
|
||||
if new_values:
|
||||
# Removing one or more values from a record, we update the record with the remaining values
|
||||
self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain)
|
||||
records = self.get_records(record, type, domain)
|
||||
return records[0], self.changed
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.delete_record(record, type, domain)
|
||||
else:
|
||||
cur_record = None
|
||||
|
||||
return None, self.changed
|
||||
|
||||
def ensure_dns_record(self, record, type, ttl, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
do_update = False
|
||||
if ttl is not None and cur_record['rrset_ttl'] != ttl:
|
||||
do_update = True
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
do_update = True
|
||||
|
||||
if do_update:
|
||||
if self.module.check_mode:
|
||||
result = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
else:
|
||||
self.update_record(record, type, values, ttl, domain)
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
result = records[0]
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
else:
|
||||
return cur_record, self.changed
|
||||
|
||||
if self.module.check_mode:
|
||||
new_record = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
result = new_record
|
||||
else:
|
||||
result = self.create_record(record, type, values, ttl, domain)
|
||||
|
||||
self.changed = True
|
||||
return result, self.changed
|
@ -0,0 +1,104 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
|
||||
# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from urllib import quote_plus # Python 2.X
|
||||
from urlparse import urljoin
|
||||
except ImportError:
|
||||
from urllib.parse import quote_plus, urljoin # Python 3+
|
||||
|
||||
import traceback
|
||||
|
||||
GITLAB_IMP_ERR = None
|
||||
try:
|
||||
import gitlab
|
||||
import requests
|
||||
HAS_GITLAB_PACKAGE = True
|
||||
except Exception:
|
||||
GITLAB_IMP_ERR = traceback.format_exc()
|
||||
HAS_GITLAB_PACKAGE = False
|
||||
|
||||
|
||||
def auth_argument_spec(spec=None):
|
||||
arg_spec = (dict(
|
||||
api_token=dict(type='str', no_log=True),
|
||||
api_oauth_token=dict(type='str', no_log=True),
|
||||
api_job_token=dict(type='str', no_log=True),
|
||||
))
|
||||
if spec:
|
||||
arg_spec.update(spec)
|
||||
return arg_spec
|
||||
|
||||
|
||||
def find_project(gitlab_instance, identifier):
|
||||
try:
|
||||
project = gitlab_instance.projects.get(identifier)
|
||||
except Exception as e:
|
||||
current_user = gitlab_instance.user
|
||||
try:
|
||||
project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
return project
|
||||
|
||||
|
||||
def find_group(gitlab_instance, identifier):
|
||||
try:
|
||||
project = gitlab_instance.groups.get(identifier)
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
return project
|
||||
|
||||
|
||||
def gitlab_authentication(module):
|
||||
gitlab_url = module.params['api_url']
|
||||
validate_certs = module.params['validate_certs']
|
||||
gitlab_user = module.params['api_username']
|
||||
gitlab_password = module.params['api_password']
|
||||
gitlab_token = module.params['api_token']
|
||||
gitlab_oauth_token = module.params['api_oauth_token']
|
||||
gitlab_job_token = module.params['api_job_token']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
|
||||
try:
|
||||
# python-gitlab library remove support for username/password authentication since 1.13.0
|
||||
# Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
|
||||
# This condition allow to still support older version of the python-gitlab library
|
||||
if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
|
||||
private_token=gitlab_token, api_version=4)
|
||||
else:
|
||||
# We can create an oauth_token using a username and password
|
||||
# https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
|
||||
if gitlab_user:
|
||||
data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
|
||||
resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs)
|
||||
resp_data = resp.json()
|
||||
gitlab_oauth_token = resp_data["access_token"]
|
||||
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token,
|
||||
oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
|
||||
|
||||
gitlab_instance.auth()
|
||||
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
|
||||
module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
|
||||
except (gitlab.exceptions.GitlabHttpError) as e:
|
||||
module.fail_json(msg="Failed to connect to GitLab server: %s. \
|
||||
GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
|
||||
|
||||
return gitlab_instance
|
@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
|
||||
HAS_HEROKU = False
|
||||
HEROKU_IMP_ERR = None
|
||||
try:
|
||||
import heroku3
|
||||
HAS_HEROKU = True
|
||||
except ImportError:
|
||||
HEROKU_IMP_ERR = traceback.format_exc()
|
||||
|
||||
|
||||
class HerokuHelper():
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.check_lib()
|
||||
self.api_key = module.params["api_key"]
|
||||
|
||||
def check_lib(self):
|
||||
if not HAS_HEROKU:
|
||||
self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR)
|
||||
|
||||
@staticmethod
|
||||
def heroku_argument_spec():
|
||||
return dict(
|
||||
api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True))
|
||||
|
||||
def get_heroku_client(self):
|
||||
client = heroku3.from_key(self.api_key)
|
||||
|
||||
if not client.is_authenticated:
|
||||
self.module.fail_json(msg='Heroku authentication failure, please check your API Key')
|
||||
|
||||
return client
|
@ -0,0 +1,442 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c), Google Inc, 2017
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or
|
||||
# https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
|
||||
THIRD_LIBRARIES_IMP_ERR = None
|
||||
try:
|
||||
from keystoneauth1.adapter import Adapter
|
||||
from keystoneauth1.identity import v3
|
||||
from keystoneauth1 import session
|
||||
HAS_THIRD_LIBRARIES = True
|
||||
except ImportError:
|
||||
THIRD_LIBRARIES_IMP_ERR = traceback.format_exc()
|
||||
HAS_THIRD_LIBRARIES = False
|
||||
|
||||
from ansible.module_utils.basic import (AnsibleModule, env_fallback,
|
||||
missing_required_lib)
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
|
||||
class HwcModuleException(Exception):
|
||||
def __init__(self, message):
|
||||
super(HwcModuleException, self).__init__()
|
||||
|
||||
self._message = message
|
||||
|
||||
def __str__(self):
|
||||
return "[HwcClientException] message=%s" % self._message
|
||||
|
||||
|
||||
class HwcClientException(Exception):
|
||||
def __init__(self, code, message):
|
||||
super(HwcClientException, self).__init__()
|
||||
|
||||
self._code = code
|
||||
self._message = message
|
||||
|
||||
def __str__(self):
|
||||
msg = " code=%s," % str(self._code) if self._code != 0 else ""
|
||||
return "[HwcClientException]%s message=%s" % (
|
||||
msg, self._message)
|
||||
|
||||
|
||||
class HwcClientException404(HwcClientException):
|
||||
def __init__(self, message):
|
||||
super(HwcClientException404, self).__init__(404, message)
|
||||
|
||||
def __str__(self):
|
||||
return "[HwcClientException404] message=%s" % self._message
|
||||
|
||||
|
||||
def session_method_wrapper(f):
|
||||
def _wrap(self, url, *args, **kwargs):
|
||||
try:
|
||||
url = self.endpoint + url
|
||||
r = f(self, url, *args, **kwargs)
|
||||
except Exception as ex:
|
||||
raise HwcClientException(
|
||||
0, "Sending request failed, error=%s" % ex)
|
||||
|
||||
result = None
|
||||
if r.content:
|
||||
try:
|
||||
result = r.json()
|
||||
except Exception as ex:
|
||||
raise HwcClientException(
|
||||
0, "Parsing response to json failed, error: %s" % ex)
|
||||
|
||||
code = r.status_code
|
||||
if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]:
|
||||
msg = ""
|
||||
for i in ['message', 'error.message']:
|
||||
try:
|
||||
msg = navigate_value(result, i)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
msg = str(result)
|
||||
|
||||
if code == 404:
|
||||
raise HwcClientException404(msg)
|
||||
|
||||
raise HwcClientException(code, msg)
|
||||
|
||||
return result
|
||||
|
||||
return _wrap
|
||||
|
||||
|
||||
class _ServiceClient(object):
|
||||
def __init__(self, client, endpoint, product):
|
||||
self._client = client
|
||||
self._endpoint = endpoint
|
||||
self._default_header = {
|
||||
'User-Agent': "Huawei-Ansible-MM-%s" % product,
|
||||
'Accept': 'application/json',
|
||||
}
|
||||
|
||||
@property
|
||||
def endpoint(self):
|
||||
return self._endpoint
|
||||
|
||||
@endpoint.setter
|
||||
def endpoint(self, e):
|
||||
self._endpoint = e
|
||||
|
||||
@session_method_wrapper
|
||||
def get(self, url, body=None, header=None, timeout=None):
|
||||
return self._client.get(url, json=body, timeout=timeout,
|
||||
headers=self._header(header))
|
||||
|
||||
@session_method_wrapper
|
||||
def post(self, url, body=None, header=None, timeout=None):
|
||||
return self._client.post(url, json=body, timeout=timeout,
|
||||
headers=self._header(header))
|
||||
|
||||
@session_method_wrapper
|
||||
def delete(self, url, body=None, header=None, timeout=None):
|
||||
return self._client.delete(url, json=body, timeout=timeout,
|
||||
headers=self._header(header))
|
||||
|
||||
@session_method_wrapper
|
||||
def put(self, url, body=None, header=None, timeout=None):
|
||||
return self._client.put(url, json=body, timeout=timeout,
|
||||
headers=self._header(header))
|
||||
|
||||
def _header(self, header):
|
||||
if header and isinstance(header, dict):
|
||||
for k, v in self._default_header.items():
|
||||
if k not in header:
|
||||
header[k] = v
|
||||
else:
|
||||
header = self._default_header
|
||||
|
||||
return header
|
||||
|
||||
|
||||
class Config(object):
|
||||
def __init__(self, module, product):
|
||||
self._project_client = None
|
||||
self._domain_client = None
|
||||
self._module = module
|
||||
self._product = product
|
||||
self._endpoints = {}
|
||||
|
||||
self._validate()
|
||||
self._gen_provider_client()
|
||||
|
||||
@property
|
||||
def module(self):
|
||||
return self._module
|
||||
|
||||
def client(self, region, service_type, service_level):
|
||||
c = self._project_client
|
||||
if service_level == "domain":
|
||||
c = self._domain_client
|
||||
|
||||
e = self._get_service_endpoint(c, service_type, region)
|
||||
|
||||
return _ServiceClient(c, e, self._product)
|
||||
|
||||
def _gen_provider_client(self):
|
||||
m = self._module
|
||||
p = {
|
||||
"auth_url": m.params['identity_endpoint'],
|
||||
"password": m.params['password'],
|
||||
"username": m.params['user'],
|
||||
"project_name": m.params['project'],
|
||||
"user_domain_name": m.params['domain'],
|
||||
"reauthenticate": True
|
||||
}
|
||||
|
||||
self._project_client = Adapter(
|
||||
session.Session(auth=v3.Password(**p)),
|
||||
raise_exc=False)
|
||||
|
||||
p.pop("project_name")
|
||||
self._domain_client = Adapter(
|
||||
session.Session(auth=v3.Password(**p)),
|
||||
raise_exc=False)
|
||||
|
||||
def _get_service_endpoint(self, client, service_type, region):
|
||||
k = "%s.%s" % (service_type, region if region else "")
|
||||
|
||||
if k in self._endpoints:
|
||||
return self._endpoints.get(k)
|
||||
|
||||
url = None
|
||||
try:
|
||||
url = client.get_endpoint(service_type=service_type,
|
||||
region_name=region, interface="public")
|
||||
except Exception as ex:
|
||||
raise HwcClientException(
|
||||
0, "Getting endpoint failed, error=%s" % ex)
|
||||
|
||||
if url == "":
|
||||
raise HwcClientException(
|
||||
0, "Can not find the enpoint for %s" % service_type)
|
||||
|
||||
if url[-1] != "/":
|
||||
url += "/"
|
||||
|
||||
self._endpoints[k] = url
|
||||
return url
|
||||
|
||||
def _validate(self):
|
||||
if not HAS_THIRD_LIBRARIES:
|
||||
self.module.fail_json(
|
||||
msg=missing_required_lib('keystoneauth1'),
|
||||
exception=THIRD_LIBRARIES_IMP_ERR)
|
||||
|
||||
|
||||
class HwcModule(AnsibleModule):
|
||||
def __init__(self, *args, **kwargs):
|
||||
arg_spec = kwargs.setdefault('argument_spec', {})
|
||||
|
||||
arg_spec.update(
|
||||
dict(
|
||||
identity_endpoint=dict(
|
||||
required=True, type='str',
|
||||
fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']),
|
||||
),
|
||||
user=dict(
|
||||
required=True, type='str',
|
||||
fallback=(env_fallback, ['ANSIBLE_HWC_USER']),
|
||||
),
|
||||
password=dict(
|
||||
required=True, type='str', no_log=True,
|
||||
fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']),
|
||||
),
|
||||
domain=dict(
|
||||
required=True, type='str',
|
||||
fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']),
|
||||
),
|
||||
project=dict(
|
||||
required=True, type='str',
|
||||
fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']),
|
||||
),
|
||||
region=dict(
|
||||
type='str',
|
||||
fallback=(env_fallback, ['ANSIBLE_HWC_REGION']),
|
||||
),
|
||||
id=dict(type='str')
|
||||
)
|
||||
)
|
||||
|
||||
super(HwcModule, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class _DictComparison(object):
|
||||
''' This class takes in two dictionaries `a` and `b`.
|
||||
These are dictionaries of arbitrary depth, but made up of standard
|
||||
Python types only.
|
||||
This differ will compare all values in `a` to those in `b`.
|
||||
If value in `a` is None, always returns True, indicating
|
||||
this value is no need to compare.
|
||||
Note: On all lists, order does matter.
|
||||
'''
|
||||
|
||||
def __init__(self, request):
|
||||
self.request = request
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare_dicts(self.request, other.request)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def _compare_dicts(self, dict1, dict2):
|
||||
if dict1 is None:
|
||||
return True
|
||||
|
||||
if set(dict1.keys()) != set(dict2.keys()):
|
||||
return False
|
||||
|
||||
for k in dict1:
|
||||
if not self._compare_value(dict1.get(k), dict2.get(k)):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _compare_lists(self, list1, list2):
|
||||
"""Takes in two lists and compares them."""
|
||||
if list1 is None:
|
||||
return True
|
||||
|
||||
if len(list1) != len(list2):
|
||||
return False
|
||||
|
||||
for i in range(len(list1)):
|
||||
if not self._compare_value(list1[i], list2[i]):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _compare_value(self, value1, value2):
|
||||
"""
|
||||
return: True: value1 is same as value2, otherwise False.
|
||||
"""
|
||||
if value1 is None:
|
||||
return True
|
||||
|
||||
if not (value1 and value2):
|
||||
return (not value1) and (not value2)
|
||||
|
||||
# Can assume non-None types at this point.
|
||||
if isinstance(value1, list) and isinstance(value2, list):
|
||||
return self._compare_lists(value1, value2)
|
||||
|
||||
elif isinstance(value1, dict) and isinstance(value2, dict):
|
||||
return self._compare_dicts(value1, value2)
|
||||
|
||||
# Always use to_text values to avoid unicode issues.
|
||||
return (to_text(value1, errors='surrogate_or_strict') == to_text(
|
||||
value2, errors='surrogate_or_strict'))
|
||||
|
||||
|
||||
def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
|
||||
is_last_time = False
|
||||
not_found_times = 0
|
||||
wait = 0
|
||||
|
||||
time.sleep(delay)
|
||||
|
||||
end = time.time() + timeout
|
||||
while not is_last_time:
|
||||
if time.time() > end:
|
||||
is_last_time = True
|
||||
|
||||
obj, status = refresh()
|
||||
|
||||
if obj is None:
|
||||
not_found_times += 1
|
||||
|
||||
if not_found_times > 10:
|
||||
raise HwcModuleException(
|
||||
"not found the object for %d times" % not_found_times)
|
||||
else:
|
||||
not_found_times = 0
|
||||
|
||||
if status in target:
|
||||
return obj
|
||||
|
||||
if pending and status not in pending:
|
||||
raise HwcModuleException(
|
||||
"unexpect status(%s) occured" % status)
|
||||
|
||||
if not is_last_time:
|
||||
wait *= 2
|
||||
if wait < min_interval:
|
||||
wait = min_interval
|
||||
elif wait > 10:
|
||||
wait = 10
|
||||
|
||||
time.sleep(wait)
|
||||
|
||||
raise HwcModuleException("asycn wait timeout after %d seconds" % timeout)
|
||||
|
||||
|
||||
def navigate_value(data, index, array_index=None):
|
||||
if array_index and (not isinstance(array_index, dict)):
|
||||
raise HwcModuleException("array_index must be dict")
|
||||
|
||||
d = data
|
||||
for n in range(len(index)):
|
||||
if d is None:
|
||||
return None
|
||||
|
||||
if not isinstance(d, dict):
|
||||
raise HwcModuleException(
|
||||
"can't navigate value from a non-dict object")
|
||||
|
||||
i = index[n]
|
||||
if i not in d:
|
||||
raise HwcModuleException(
|
||||
"navigate value failed: key(%s) is not exist in dict" % i)
|
||||
d = d[i]
|
||||
|
||||
if not array_index:
|
||||
continue
|
||||
|
||||
k = ".".join(index[: (n + 1)])
|
||||
if k not in array_index:
|
||||
continue
|
||||
|
||||
if d is None:
|
||||
return None
|
||||
|
||||
if not isinstance(d, list):
|
||||
raise HwcModuleException(
|
||||
"can't navigate value from a non-list object")
|
||||
|
||||
j = array_index.get(k)
|
||||
if j >= len(d):
|
||||
raise HwcModuleException(
|
||||
"navigate value failed: the index is out of list")
|
||||
d = d[j]
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def build_path(module, path, kv=None):
|
||||
if kv is None:
|
||||
kv = dict()
|
||||
|
||||
v = {}
|
||||
for p in re.findall(r"{[^/]*}", path):
|
||||
n = p[1:][:-1]
|
||||
|
||||
if n in kv:
|
||||
v[n] = str(kv[n])
|
||||
|
||||
else:
|
||||
if n in module.params:
|
||||
v[n] = str(module.params.get(n))
|
||||
else:
|
||||
v[n] = ""
|
||||
|
||||
return path.format(**v)
|
||||
|
||||
|
||||
def get_region(module):
|
||||
if module.params['region']:
|
||||
return module.params['region']
|
||||
|
||||
return module.params['project'].split("_")[0]
|
||||
|
||||
|
||||
def is_empty_value(v):
|
||||
return (not v)
|
||||
|
||||
|
||||
def are_different_dicts(dict1, dict2):
|
||||
return _DictComparison(dict1) != _DictComparison(dict2)
|
@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018 IBM CORPORATION
|
||||
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from functools import wraps
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
|
||||
PYXCLI_INSTALLED = True
|
||||
PYXCLI_IMP_ERR = None
|
||||
try:
|
||||
from pyxcli import client, errors
|
||||
except ImportError:
|
||||
PYXCLI_IMP_ERR = traceback.format_exc()
|
||||
PYXCLI_INSTALLED = False
|
||||
|
||||
AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size',
|
||||
'domain', 'perf_class', 'vol',
|
||||
'iscsi_chap_name', 'iscsi_chap_secret',
|
||||
'cluster', 'host', 'lun', 'override',
|
||||
'fcaddress', 'iscsi_name', 'max_dms',
|
||||
'max_cgs', 'ldap_id', 'max_mirrors',
|
||||
'max_pools', 'max_volumes', 'hard_capacity',
|
||||
'soft_capacity']
|
||||
|
||||
|
||||
def xcli_wrapper(func):
|
||||
""" Catch xcli errors and return a proper message"""
|
||||
@wraps(func)
|
||||
def wrapper(module, *args, **kwargs):
|
||||
try:
|
||||
return func(module, *args, **kwargs)
|
||||
except errors.CommandExecutionError as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
return wrapper
|
||||
|
||||
|
||||
@xcli_wrapper
|
||||
def connect_ssl(module):
|
||||
endpoints = module.params['endpoints']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
if not (username and password and endpoints):
|
||||
module.fail_json(
|
||||
msg="Username, password or endpoints arguments "
|
||||
"are missing from the module arguments")
|
||||
|
||||
try:
|
||||
return client.XCLIClient.connect_multiendpoint_ssl(username,
|
||||
password,
|
||||
endpoints)
|
||||
except errors.CommandFailedConnectionError as e:
|
||||
module.fail_json(
|
||||
msg="Connection with Spectrum Accelerate system has "
|
||||
"failed: {[0]}.".format(to_native(e)))
|
||||
|
||||
|
||||
def spectrum_accelerate_spec():
|
||||
""" Return arguments spec for AnsibleModule """
|
||||
return dict(
|
||||
endpoints=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(no_log=True, required=True),
|
||||
)
|
||||
|
||||
|
||||
@xcli_wrapper
|
||||
def execute_pyxcli_command(module, xcli_command, xcli_client):
|
||||
pyxcli_args = build_pyxcli_command(module.params)
|
||||
getattr(xcli_client.cmd, xcli_command)(**(pyxcli_args))
|
||||
return True
|
||||
|
||||
|
||||
def build_pyxcli_command(fields):
|
||||
""" Builds the args for pyxcli using the exact args from ansible"""
|
||||
pyxcli_args = {}
|
||||
for field in fields:
|
||||
if not fields[field]:
|
||||
continue
|
||||
if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '':
|
||||
pyxcli_args[field] = fields[field]
|
||||
return pyxcli_args
|
||||
|
||||
|
||||
def is_pyxcli_installed(module):
|
||||
if not PYXCLI_INSTALLED:
|
||||
module.fail_json(msg=missing_required_lib('pyxcli'),
|
||||
exception=PYXCLI_IMP_ERR)
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,232 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
|
||||
|
||||
class iLORedfishUtils(RedfishUtils):
|
||||
|
||||
def get_ilo_sessions(self):
|
||||
result = {}
|
||||
# listing all users has always been slower than other operations, why?
|
||||
session_list = []
|
||||
sessions_results = []
|
||||
# Get these entries, but does not fail if not found
|
||||
properties = ['Description', 'Id', 'Name', 'UserName']
|
||||
|
||||
# Changed self.sessions_uri to Hardcoded string.
|
||||
response = self.get_request(
|
||||
self.root_uri + self.service_root + "SessionService/Sessions/")
|
||||
if not response['ret']:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if 'Oem' in data:
|
||||
if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]:
|
||||
current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]
|
||||
|
||||
for sessions in data[u'Members']:
|
||||
# session_list[] are URIs
|
||||
session_list.append(sessions[u'@odata.id'])
|
||||
# for each session, get details
|
||||
for uri in session_list:
|
||||
session = {}
|
||||
if uri != current_session:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
for property in properties:
|
||||
if property in data:
|
||||
session[property] = data[property]
|
||||
sessions_results.append(session)
|
||||
result["msg"] = sessions_results
|
||||
result["ret"] = True
|
||||
return result
|
||||
|
||||
def set_ntp_server(self, mgr_attributes):
|
||||
result = {}
|
||||
setkey = mgr_attributes['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + ethuri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
payload = {"DHCPv4": {
|
||||
"UseNTPServers": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv4"]["UseNTPServers"]:
|
||||
payload["DHCPv4"]["UseNTPServers"] = False
|
||||
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv4['ret']:
|
||||
return res_dhv4
|
||||
|
||||
payload = {"DHCPv6": {
|
||||
"UseNTPServers": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv6"]["UseNTPServers"]:
|
||||
payload["DHCPv6"]["UseNTPServers"] = False
|
||||
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv6['ret']:
|
||||
return res_dhv6
|
||||
|
||||
datetime_uri = self.manager_uri + "DateTime"
|
||||
|
||||
response = self.get_request(self.root_uri + datetime_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
ntp_list = data[setkey]
|
||||
if(len(ntp_list) == 2):
|
||||
ntp_list.pop(0)
|
||||
|
||||
ntp_list.append(mgr_attributes['mgr_attr_value'])
|
||||
|
||||
payload = {setkey: ntp_list}
|
||||
|
||||
response1 = self.patch_request(self.root_uri + datetime_uri, payload)
|
||||
if not response1['ret']:
|
||||
return response1
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']}
|
||||
|
||||
def set_time_zone(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
|
||||
uri = self.manager_uri + "DateTime/"
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response["data"]
|
||||
|
||||
if key not in data:
|
||||
return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key}
|
||||
|
||||
timezones = data["TimeZoneList"]
|
||||
index = ""
|
||||
for tz in timezones:
|
||||
if attr['mgr_attr_value'] in tz["Name"]:
|
||||
index = tz["Index"]
|
||||
break
|
||||
|
||||
payload = {key: {"Index": index}}
|
||||
response = self.patch_request(self.root_uri + uri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_dns_server(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
uri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
dns_list = data["Oem"]["Hpe"]["IPv4"][key]
|
||||
|
||||
if len(dns_list) == 3:
|
||||
dns_list.pop(0)
|
||||
|
||||
dns_list.append(attr['mgr_attr_value'])
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
"Hpe": {
|
||||
"IPv4": {
|
||||
key: dns_list
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = self.patch_request(self.root_uri + uri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_domain_name(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + ethuri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
payload = {"DHCPv4": {
|
||||
"UseDomainName": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv4"]["UseDomainName"]:
|
||||
payload["DHCPv4"]["UseDomainName"] = False
|
||||
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv4['ret']:
|
||||
return res_dhv4
|
||||
|
||||
payload = {"DHCPv6": {
|
||||
"UseDomainName": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv6"]["UseDomainName"]:
|
||||
payload["DHCPv6"]["UseDomainName"] = False
|
||||
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv6['ret']:
|
||||
return res_dhv6
|
||||
|
||||
domain_name = attr['mgr_attr_value']
|
||||
|
||||
payload = {"Oem": {
|
||||
"Hpe": {
|
||||
key: domain_name
|
||||
}
|
||||
}}
|
||||
|
||||
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_wins_registration(self, mgrattr):
|
||||
Key = mgrattr['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
"Hpe": {
|
||||
"IPv4": {
|
||||
Key: False
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests.exceptions
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REQUESTS = False
|
||||
|
||||
INFLUXDB_IMP_ERR = None
|
||||
try:
|
||||
from influxdb import InfluxDBClient
|
||||
from influxdb import __version__ as influxdb_version
|
||||
from influxdb import exceptions
|
||||
HAS_INFLUXDB = True
|
||||
except ImportError:
|
||||
INFLUXDB_IMP_ERR = traceback.format_exc()
|
||||
HAS_INFLUXDB = False
|
||||
|
||||
|
||||
class InfluxDb():
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.params = self.module.params
|
||||
self.check_lib()
|
||||
self.hostname = self.params['hostname']
|
||||
self.port = self.params['port']
|
||||
self.path = self.params['path']
|
||||
self.username = self.params['username']
|
||||
self.password = self.params['password']
|
||||
self.database_name = self.params.get('database_name')
|
||||
|
||||
def check_lib(self):
|
||||
if not HAS_REQUESTS:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
|
||||
if not HAS_INFLUXDB:
|
||||
self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR)
|
||||
|
||||
@staticmethod
|
||||
def influxdb_argument_spec():
|
||||
return dict(
|
||||
hostname=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=8086),
|
||||
path=dict(type='str', default=''),
|
||||
username=dict(type='str', default='root', aliases=['login_username']),
|
||||
password=dict(type='str', default='root', no_log=True, aliases=['login_password']),
|
||||
ssl=dict(type='bool', default=False),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
timeout=dict(type='int'),
|
||||
retries=dict(type='int', default=3),
|
||||
proxies=dict(type='dict', default={}),
|
||||
use_udp=dict(type='bool', default=False),
|
||||
udp_port=dict(type='int', default=4444),
|
||||
)
|
||||
|
||||
def connect_to_influxdb(self):
|
||||
args = dict(
|
||||
host=self.hostname,
|
||||
port=self.port,
|
||||
username=self.username,
|
||||
password=self.password,
|
||||
database=self.database_name,
|
||||
ssl=self.params['ssl'],
|
||||
verify_ssl=self.params['validate_certs'],
|
||||
timeout=self.params['timeout'],
|
||||
use_udp=self.params['use_udp'],
|
||||
udp_port=self.params['udp_port'],
|
||||
proxies=self.params['proxies'],
|
||||
)
|
||||
influxdb_api_version = LooseVersion(influxdb_version)
|
||||
if influxdb_api_version >= LooseVersion('4.1.0'):
|
||||
# retries option is added in version 4.1.0
|
||||
args.update(retries=self.params['retries'])
|
||||
|
||||
if influxdb_api_version >= LooseVersion('5.1.0'):
|
||||
# path argument is added in version 5.1.0
|
||||
args.update(path=self.path)
|
||||
|
||||
return InfluxDBClient(**args)
|
@ -0,0 +1,214 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import uuid
|
||||
|
||||
import re
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.six import PY3
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
from ansible.module_utils.urls import fetch_url, HAS_GSSAPI
|
||||
from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound
|
||||
|
||||
|
||||
def _env_then_dns_fallback(*args, **kwargs):
|
||||
''' Load value from environment or DNS in that order'''
|
||||
try:
|
||||
result = env_fallback(*args, **kwargs)
|
||||
if result == '':
|
||||
raise AnsibleFallbackNotFound
|
||||
return result
|
||||
except AnsibleFallbackNotFound:
|
||||
# If no host was given, we try to guess it from IPA.
|
||||
# The ipa-ca entry is a standard entry that IPA will have set for
|
||||
# the CA.
|
||||
try:
|
||||
return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0]
|
||||
except Exception:
|
||||
raise AnsibleFallbackNotFound
|
||||
|
||||
|
||||
class IPAClient(object):
|
||||
def __init__(self, module, host, port, protocol):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.protocol = protocol
|
||||
self.module = module
|
||||
self.headers = None
|
||||
self.timeout = module.params.get('ipa_timeout')
|
||||
self.use_gssapi = False
|
||||
|
||||
def get_base_url(self):
|
||||
return '%s://%s/ipa' % (self.protocol, self.host)
|
||||
|
||||
def get_json_url(self):
|
||||
return '%s/session/json' % self.get_base_url()
|
||||
|
||||
def login(self, username, password):
|
||||
if 'KRB5CCNAME' in os.environ and HAS_GSSAPI:
|
||||
self.use_gssapi = True
|
||||
elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI:
|
||||
ccache = "MEMORY:" + str(uuid.uuid4())
|
||||
os.environ['KRB5CCNAME'] = ccache
|
||||
self.use_gssapi = True
|
||||
else:
|
||||
if not password:
|
||||
if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ:
|
||||
self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'")
|
||||
self._fail('login', 'Password is required if not using '
|
||||
'GSSAPI. To use GSSAPI, please set the '
|
||||
'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) '
|
||||
' environment variables.')
|
||||
url = '%s/session/login_password' % self.get_base_url()
|
||||
data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
|
||||
headers = {'referer': self.get_base_url(),
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
'Accept': 'text/plain'}
|
||||
try:
|
||||
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout)
|
||||
status_code = info['status']
|
||||
if status_code not in [200, 201, 204]:
|
||||
self._fail('login', info['msg'])
|
||||
|
||||
self.headers = {'Cookie': info.get('set-cookie')}
|
||||
except Exception as e:
|
||||
self._fail('login', to_native(e))
|
||||
if not self.headers:
|
||||
self.headers = dict()
|
||||
self.headers.update({
|
||||
'referer': self.get_base_url(),
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json'})
|
||||
|
||||
def _fail(self, msg, e):
|
||||
if 'message' in e:
|
||||
err_string = e.get('message')
|
||||
else:
|
||||
err_string = e
|
||||
self.module.fail_json(msg='%s: %s' % (msg, err_string))
|
||||
|
||||
def get_ipa_version(self):
|
||||
response = self.ping()['summary']
|
||||
ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*')
|
||||
version_match = ipa_ver_regex.match(response)
|
||||
ipa_version = None
|
||||
if version_match:
|
||||
ipa_version = version_match.groups()[0]
|
||||
return ipa_version
|
||||
|
||||
def ping(self):
|
||||
return self._post_json(method='ping', name=None)
|
||||
|
||||
def _post_json(self, method, name, item=None):
|
||||
if item is None:
|
||||
item = {}
|
||||
url = '%s/session/json' % self.get_base_url()
|
||||
data = dict(method=method)
|
||||
|
||||
# TODO: We should probably handle this a little better.
|
||||
if method in ('ping', 'config_show', 'otpconfig_show'):
|
||||
data['params'] = [[], {}]
|
||||
elif method in ('config_mod', 'otpconfig_mod'):
|
||||
data['params'] = [[], item]
|
||||
else:
|
||||
data['params'] = [[name], item]
|
||||
|
||||
try:
|
||||
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)),
|
||||
headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi)
|
||||
status_code = info['status']
|
||||
if status_code not in [200, 201, 204]:
|
||||
self._fail(method, info['msg'])
|
||||
except Exception as e:
|
||||
self._fail('post %s' % method, to_native(e))
|
||||
|
||||
if PY3:
|
||||
charset = resp.headers.get_content_charset('latin-1')
|
||||
else:
|
||||
response_charset = resp.headers.getparam('charset')
|
||||
if response_charset:
|
||||
charset = response_charset
|
||||
else:
|
||||
charset = 'latin-1'
|
||||
resp = json.loads(to_text(resp.read(), encoding=charset))
|
||||
err = resp.get('error')
|
||||
if err is not None:
|
||||
self._fail('response %s' % method, err)
|
||||
|
||||
if 'result' in resp:
|
||||
result = resp.get('result')
|
||||
if 'result' in result:
|
||||
result = result.get('result')
|
||||
if isinstance(result, list):
|
||||
if len(result) > 0:
|
||||
return result[0]
|
||||
else:
|
||||
return {}
|
||||
return result
|
||||
return None
|
||||
|
||||
def get_diff(self, ipa_data, module_data):
|
||||
result = []
|
||||
for key in module_data.keys():
|
||||
mod_value = module_data.get(key, None)
|
||||
if isinstance(mod_value, list):
|
||||
default = []
|
||||
else:
|
||||
default = None
|
||||
ipa_value = ipa_data.get(key, default)
|
||||
if isinstance(ipa_value, list) and not isinstance(mod_value, list):
|
||||
mod_value = [mod_value]
|
||||
if isinstance(ipa_value, list) and isinstance(mod_value, list):
|
||||
mod_value = sorted(mod_value)
|
||||
ipa_value = sorted(ipa_value)
|
||||
if mod_value != ipa_value:
|
||||
result.append(key)
|
||||
return result
|
||||
|
||||
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None, append=None):
|
||||
changed = False
|
||||
diff = list(set(ipa_list) - set(module_list))
|
||||
if append is not True and len(diff) > 0:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
if item:
|
||||
remove_method(name=name, item={item: diff})
|
||||
else:
|
||||
remove_method(name=name, item=diff)
|
||||
|
||||
diff = list(set(module_list) - set(ipa_list))
|
||||
if len(diff) > 0:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
if item:
|
||||
add_method(name=name, item={item: diff})
|
||||
else:
|
||||
add_method(name=name, item=diff)
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def ipa_argument_spec():
|
||||
return dict(
|
||||
ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
|
||||
ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])),
|
||||
ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
|
||||
ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
|
||||
ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
|
||||
ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
)
|
@ -0,0 +1,182 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import hmac
|
||||
import re
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
|
||||
try:
|
||||
from hashlib import sha1
|
||||
except ImportError:
|
||||
import sha as sha1
|
||||
|
||||
HASHED_KEY_MAGIC = "|1|"
|
||||
|
||||
|
||||
def is_ssh_url(url):
|
||||
|
||||
""" check if url is ssh """
|
||||
|
||||
if "@" in url and "://" not in url:
|
||||
return True
|
||||
for scheme in "ssh://", "git+ssh://", "ssh+git://":
|
||||
if url.startswith(scheme):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_fqdn_and_port(repo_url):
|
||||
|
||||
""" chop the hostname and port out of a url """
|
||||
|
||||
fqdn = None
|
||||
port = None
|
||||
ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
|
||||
if "@" in repo_url and "://" not in repo_url:
|
||||
# most likely an user@host:path or user@host/path type URL
|
||||
repo_url = repo_url.split("@", 1)[1]
|
||||
match = ipv6_re.match(repo_url)
|
||||
# For this type of URL, colon specifies the path, not the port
|
||||
if match:
|
||||
fqdn, path = match.groups()
|
||||
elif ":" in repo_url:
|
||||
fqdn = repo_url.split(":")[0]
|
||||
elif "/" in repo_url:
|
||||
fqdn = repo_url.split("/")[0]
|
||||
elif "://" in repo_url:
|
||||
# this should be something we can parse with urlparse
|
||||
parts = urlparse(repo_url)
|
||||
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
|
||||
# ensure we actually have a parts[1] before continuing.
|
||||
if parts[1] != '':
|
||||
fqdn = parts[1]
|
||||
if "@" in fqdn:
|
||||
fqdn = fqdn.split("@", 1)[1]
|
||||
match = ipv6_re.match(fqdn)
|
||||
if match:
|
||||
fqdn, port = match.groups()
|
||||
elif ":" in fqdn:
|
||||
fqdn, port = fqdn.split(":")[0:2]
|
||||
return fqdn, port
|
||||
|
||||
|
||||
def check_hostkey(module, fqdn):
|
||||
return not not_in_host_file(module, fqdn)
|
||||
|
||||
|
||||
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
|
||||
# the paramiko code to import and use this.
|
||||
|
||||
def not_in_host_file(self, host):
|
||||
|
||||
if 'USER' in os.environ:
|
||||
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||
else:
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_host_file = os.path.expanduser(user_host_file)
|
||||
|
||||
host_file_list = [
|
||||
user_host_file,
|
||||
"/etc/ssh/ssh_known_hosts",
|
||||
"/etc/ssh/ssh_known_hosts2",
|
||||
"/etc/openssh/ssh_known_hosts",
|
||||
]
|
||||
|
||||
hfiles_not_found = 0
|
||||
for hf in host_file_list:
|
||||
if not os.path.exists(hf):
|
||||
hfiles_not_found += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
host_fh = open(hf)
|
||||
except IOError:
|
||||
hfiles_not_found += 1
|
||||
continue
|
||||
else:
|
||||
data = host_fh.read()
|
||||
host_fh.close()
|
||||
|
||||
for line in data.split("\n"):
|
||||
if line is None or " " not in line:
|
||||
continue
|
||||
tokens = line.split()
|
||||
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
|
||||
# this is a hashed known host entry
|
||||
try:
|
||||
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
|
||||
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
|
||||
hash.update(host)
|
||||
if hash.digest() == kn_host.decode('base64'):
|
||||
return False
|
||||
except Exception:
|
||||
# invalid hashed host key, skip it
|
||||
continue
|
||||
else:
|
||||
# standard host file entry
|
||||
if host in tokens[0]:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
|
||||
|
||||
""" use ssh-keyscan to add the hostkey """
|
||||
|
||||
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
|
||||
|
||||
if 'USER' in os.environ:
|
||||
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
|
||||
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||
else:
|
||||
user_ssh_dir = "~/.ssh/"
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_ssh_dir = os.path.expanduser(user_ssh_dir)
|
||||
|
||||
if not os.path.exists(user_ssh_dir):
|
||||
if create_dir:
|
||||
try:
|
||||
os.makedirs(user_ssh_dir, int('700', 8))
|
||||
except Exception:
|
||||
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
|
||||
else:
|
||||
module.fail_json(msg="%s does not exist" % user_ssh_dir)
|
||||
elif not os.path.isdir(user_ssh_dir):
|
||||
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
|
||||
|
||||
if port:
|
||||
this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
|
||||
else:
|
||||
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
|
||||
|
||||
rc, out, err = module.run_command(this_cmd)
|
||||
# ssh-keyscan gives a 0 exit code and prints nothing on timeout
|
||||
if rc != 0 or not out:
|
||||
msg = 'failed to retrieve hostkey'
|
||||
if not out:
|
||||
msg += '. "%s" returned no matches.' % this_cmd
|
||||
else:
|
||||
msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
|
||||
|
||||
if err:
|
||||
msg += ' [stderr]: %s' % err
|
||||
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
module.append_to_file(user_host_file, out)
|
||||
|
||||
return rc, out, err
|
@ -0,0 +1,92 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
|
||||
# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
|
||||
# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
import ldap
|
||||
import ldap.sasl
|
||||
|
||||
HAS_LDAP = True
|
||||
|
||||
SASCL_CLASS = {
|
||||
'gssapi': ldap.sasl.gssapi,
|
||||
'external': ldap.sasl.external,
|
||||
}
|
||||
except ImportError:
|
||||
HAS_LDAP = False
|
||||
|
||||
|
||||
def gen_specs(**specs):
|
||||
specs.update({
|
||||
'bind_dn': dict(),
|
||||
'bind_pw': dict(default='', no_log=True),
|
||||
'dn': dict(required=True),
|
||||
'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']),
|
||||
'server_uri': dict(default='ldapi:///'),
|
||||
'start_tls': dict(default=False, type='bool'),
|
||||
'validate_certs': dict(default=True, type='bool'),
|
||||
'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
|
||||
})
|
||||
|
||||
return specs
|
||||
|
||||
|
||||
class LdapGeneric(object):
|
||||
def __init__(self, module):
|
||||
# Shortcuts
|
||||
self.module = module
|
||||
self.bind_dn = self.module.params['bind_dn']
|
||||
self.bind_pw = self.module.params['bind_pw']
|
||||
self.dn = self.module.params['dn']
|
||||
self.referrals_chasing = self.module.params['referrals_chasing']
|
||||
self.server_uri = self.module.params['server_uri']
|
||||
self.start_tls = self.module.params['start_tls']
|
||||
self.verify_cert = self.module.params['validate_certs']
|
||||
self.sasl_class = self.module.params['sasl_class']
|
||||
|
||||
# Establish connection
|
||||
self.connection = self._connect_to_ldap()
|
||||
|
||||
def fail(self, msg, exn):
|
||||
self.module.fail_json(
|
||||
msg=msg,
|
||||
details=to_native(exn),
|
||||
exception=traceback.format_exc()
|
||||
)
|
||||
|
||||
def _connect_to_ldap(self):
|
||||
if not self.verify_cert:
|
||||
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
|
||||
|
||||
connection = ldap.initialize(self.server_uri)
|
||||
|
||||
if self.referrals_chasing == 'disabled':
|
||||
# Switch off chasing of referrals (https://github.com/ansible-collections/community.general/issues/1067)
|
||||
connection.set_option(ldap.OPT_REFERRALS, 0)
|
||||
|
||||
if self.start_tls:
|
||||
try:
|
||||
connection.start_tls_s()
|
||||
except ldap.LDAPError as e:
|
||||
self.fail("Cannot start TLS.", e)
|
||||
|
||||
try:
|
||||
if self.bind_dn is not None:
|
||||
connection.simple_bind_s(self.bind_dn, self.bind_pw)
|
||||
else:
|
||||
klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external)
|
||||
connection.sasl_interactive_bind_s('', klass())
|
||||
except ldap.LDAPError as e:
|
||||
self.fail("Cannot bind to the server.", e)
|
||||
|
||||
return connection
|
@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Luke Murphy @decentral1se
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
def get_user_agent(module):
|
||||
"""Retrieve a user-agent to send with LinodeClient requests."""
|
||||
try:
|
||||
from ansible.module_utils.ansible_release import __version__ as ansible_version
|
||||
except ImportError:
|
||||
ansible_version = 'unknown'
|
||||
return 'Ansible-%s/%s' % (module, ansible_version)
|
@ -0,0 +1,132 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
|
||||
#
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
from ansible.module_utils.urls import generic_urlparse
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.module_utils.six.moves import http_client
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
# httplib/http.client connection using unix domain socket
|
||||
HTTPConnection = http_client.HTTPConnection
|
||||
HTTPSConnection = http_client.HTTPSConnection
|
||||
|
||||
import json
|
||||
|
||||
|
||||
class UnixHTTPConnection(HTTPConnection):
|
||||
def __init__(self, path):
|
||||
HTTPConnection.__init__(self, 'localhost')
|
||||
self.path = path
|
||||
|
||||
def connect(self):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.connect(self.path)
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class LXDClientException(Exception):
|
||||
def __init__(self, msg, **kwargs):
|
||||
self.msg = msg
|
||||
self.kwargs = kwargs
|
||||
|
||||
|
||||
class LXDClient(object):
|
||||
def __init__(self, url, key_file=None, cert_file=None, debug=False):
|
||||
"""LXD Client.
|
||||
|
||||
:param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
|
||||
:type url: ``str``
|
||||
:param key_file: The path of the client certificate key file.
|
||||
:type key_file: ``str``
|
||||
:param cert_file: The path of the client certificate file.
|
||||
:type cert_file: ``str``
|
||||
:param debug: The debug flag. The request and response are stored in logs when debug is true.
|
||||
:type debug: ``bool``
|
||||
"""
|
||||
self.url = url
|
||||
self.debug = debug
|
||||
self.logs = []
|
||||
if url.startswith('https:'):
|
||||
self.cert_file = cert_file
|
||||
self.key_file = key_file
|
||||
parts = generic_urlparse(urlparse(self.url))
|
||||
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
ctx.load_cert_chain(cert_file, keyfile=key_file)
|
||||
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
|
||||
elif url.startswith('unix:'):
|
||||
unix_socket_path = url[len('unix:'):]
|
||||
self.connection = UnixHTTPConnection(unix_socket_path)
|
||||
else:
|
||||
raise LXDClientException('URL scheme must be unix: or https:')
|
||||
|
||||
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None):
|
||||
resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
|
||||
if resp_json['type'] == 'async':
|
||||
url = '{0}/wait'.format(resp_json['operation'])
|
||||
resp_json = self._send_request('GET', url)
|
||||
if wait_for_container:
|
||||
while resp_json['metadata']['status'] == 'Running':
|
||||
resp_json = self._send_request('GET', url)
|
||||
if resp_json['metadata']['status'] != 'Success':
|
||||
self._raise_err_from_json(resp_json)
|
||||
return resp_json
|
||||
|
||||
def authenticate(self, trust_password):
|
||||
body_json = {'type': 'client', 'password': trust_password}
|
||||
return self._send_request('POST', '/1.0/certificates', body_json=body_json)
|
||||
|
||||
def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
|
||||
try:
|
||||
body = json.dumps(body_json)
|
||||
self.connection.request(method, url, body=body)
|
||||
resp = self.connection.getresponse()
|
||||
resp_data = resp.read()
|
||||
resp_data = to_text(resp_data, errors='surrogate_or_strict')
|
||||
resp_json = json.loads(resp_data)
|
||||
self.logs.append({
|
||||
'type': 'sent request',
|
||||
'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
|
||||
'response': {'json': resp_json}
|
||||
})
|
||||
resp_type = resp_json.get('type', None)
|
||||
if resp_type == 'error':
|
||||
if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
|
||||
return resp_json
|
||||
if resp_json['error'] == "Certificate already in trust store":
|
||||
return resp_json
|
||||
self._raise_err_from_json(resp_json)
|
||||
return resp_json
|
||||
except socket.error as e:
|
||||
raise LXDClientException('cannot connect to the LXD server', err=e)
|
||||
|
||||
def _raise_err_from_json(self, resp_json):
|
||||
err_params = {}
|
||||
if self.debug:
|
||||
err_params['logs'] = self.logs
|
||||
raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
|
||||
|
||||
@staticmethod
|
||||
def _get_err_from_resp_json(resp_json):
|
||||
err = None
|
||||
metadata = resp_json.get('metadata', None)
|
||||
if metadata is not None:
|
||||
err = metadata.get('err', None)
|
||||
if err is None:
|
||||
err = resp_json.get('error', None)
|
||||
return err
|
@ -0,0 +1,157 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
|
||||
#
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
|
||||
CLIENT_IMP_ERR = None
|
||||
try:
|
||||
from manageiq_client.api import ManageIQClient
|
||||
HAS_CLIENT = True
|
||||
except ImportError:
|
||||
CLIENT_IMP_ERR = traceback.format_exc()
|
||||
HAS_CLIENT = False
|
||||
|
||||
|
||||
def manageiq_argument_spec():
|
||||
options = dict(
|
||||
url=dict(default=os.environ.get('MIQ_URL', None)),
|
||||
username=dict(default=os.environ.get('MIQ_USERNAME', None)),
|
||||
password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True),
|
||||
token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True),
|
||||
validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
|
||||
ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']),
|
||||
)
|
||||
|
||||
return dict(
|
||||
manageiq_connection=dict(type='dict',
|
||||
apply_defaults=True,
|
||||
options=options),
|
||||
)
|
||||
|
||||
|
||||
def check_client(module):
|
||||
if not HAS_CLIENT:
|
||||
module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR)
|
||||
|
||||
|
||||
def validate_connection_params(module):
|
||||
params = module.params['manageiq_connection']
|
||||
error_str = "missing required argument: manageiq_connection[{}]"
|
||||
url = params['url']
|
||||
token = params['token']
|
||||
username = params['username']
|
||||
password = params['password']
|
||||
|
||||
if (url and username and password) or (url and token):
|
||||
return params
|
||||
for arg in ['url', 'username', 'password']:
|
||||
if params[arg] in (None, ''):
|
||||
module.fail_json(msg=error_str.format(arg))
|
||||
|
||||
|
||||
def manageiq_entities():
|
||||
return {
|
||||
'provider': 'providers', 'host': 'hosts', 'vm': 'vms',
|
||||
'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores',
|
||||
'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services',
|
||||
'service template': 'service_templates', 'template': 'templates',
|
||||
'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints'
|
||||
}
|
||||
|
||||
|
||||
class ManageIQ(object):
|
||||
"""
|
||||
class encapsulating ManageIQ API client.
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
# handle import errors
|
||||
check_client(module)
|
||||
|
||||
params = validate_connection_params(module)
|
||||
|
||||
url = params['url']
|
||||
username = params['username']
|
||||
password = params['password']
|
||||
token = params['token']
|
||||
verify_ssl = params['validate_certs']
|
||||
ca_bundle_path = params['ca_cert']
|
||||
|
||||
self._module = module
|
||||
self._api_url = url + '/api'
|
||||
self._auth = dict(user=username, password=password, token=token)
|
||||
try:
|
||||
self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e)))
|
||||
|
||||
@property
|
||||
def module(self):
|
||||
""" Ansible module module
|
||||
|
||||
Returns:
|
||||
the ansible module
|
||||
"""
|
||||
return self._module
|
||||
|
||||
@property
|
||||
def api_url(self):
|
||||
""" Base ManageIQ API
|
||||
|
||||
Returns:
|
||||
the base ManageIQ API
|
||||
"""
|
||||
return self._api_url
|
||||
|
||||
@property
|
||||
def client(self):
|
||||
""" ManageIQ client
|
||||
|
||||
Returns:
|
||||
the ManageIQ client
|
||||
"""
|
||||
return self._client
|
||||
|
||||
def find_collection_resource_by(self, collection_name, **params):
|
||||
""" Searches the collection resource by the collection name and the param passed.
|
||||
|
||||
Returns:
|
||||
the resource as an object if it exists in manageiq, None otherwise.
|
||||
"""
|
||||
try:
|
||||
entity = self.client.collections.__getattribute__(collection_name).get(**params)
|
||||
except ValueError:
|
||||
return None
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to find resource {error}".format(error=e))
|
||||
return vars(entity)
|
||||
|
||||
def find_collection_resource_or_fail(self, collection_name, **params):
|
||||
""" Searches the collection resource by the collection name and the param passed.
|
||||
|
||||
Returns:
|
||||
the resource as an object if it exists in manageiq, Fail otherwise.
|
||||
"""
|
||||
resource = self.find_collection_resource_by(collection_name, **params)
|
||||
if resource:
|
||||
return resource
|
||||
else:
|
||||
msg = "{collection_name} where {params} does not exist in manageiq".format(
|
||||
collection_name=collection_name, params=str(params))
|
||||
self.module.fail_json(msg=msg)
|
@ -0,0 +1,138 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.urls import open_url, urllib_error
|
||||
from ansible.module_utils.basic import json
|
||||
|
||||
|
||||
class Response(object):
|
||||
'''
|
||||
Create a response object to mimic that of requests.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.content = None
|
||||
self.status_code = None
|
||||
|
||||
def json(self):
|
||||
return json.loads(self.content)
|
||||
|
||||
|
||||
def memset_api_call(api_key, api_method, payload=None):
|
||||
'''
|
||||
Generic function which returns results back to calling function.
|
||||
|
||||
Requires an API key and an API method to assemble the API URL.
|
||||
Returns response text to be analysed.
|
||||
'''
|
||||
# instantiate a response object
|
||||
response = Response()
|
||||
|
||||
# if we've already started preloading the payload then copy it
|
||||
# and use that, otherwise we need to isntantiate it.
|
||||
if payload is None:
|
||||
payload = dict()
|
||||
else:
|
||||
payload = payload.copy()
|
||||
|
||||
# set some sane defaults
|
||||
has_failed = False
|
||||
msg = None
|
||||
|
||||
data = urlencode(payload)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
api_uri_base = 'https://api.memset.com/v1/json/'
|
||||
api_uri = '{0}{1}/' . format(api_uri_base, api_method)
|
||||
|
||||
try:
|
||||
resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key)
|
||||
response.content = resp.read().decode('utf-8')
|
||||
response.status_code = resp.getcode()
|
||||
except urllib_error.HTTPError as e:
|
||||
try:
|
||||
errorcode = e.code
|
||||
except AttributeError:
|
||||
errorcode = None
|
||||
|
||||
has_failed = True
|
||||
response.content = e.read().decode('utf8')
|
||||
response.status_code = errorcode
|
||||
|
||||
if response.status_code is not None:
|
||||
msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error'])
|
||||
else:
|
||||
msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
|
||||
|
||||
if msg is None:
|
||||
msg = response.json()
|
||||
|
||||
return(has_failed, msg, response)
|
||||
|
||||
|
||||
def check_zone_domain(data, domain):
|
||||
'''
|
||||
Returns true if domain already exists, and false if not.
|
||||
'''
|
||||
exists = False
|
||||
|
||||
if data.status_code in [201, 200]:
|
||||
for zone_domain in data.json():
|
||||
if zone_domain['domain'] == domain:
|
||||
exists = True
|
||||
|
||||
return(exists)
|
||||
|
||||
|
||||
def check_zone(data, name):
|
||||
'''
|
||||
Returns true if zone already exists, and false if not.
|
||||
'''
|
||||
counter = 0
|
||||
exists = False
|
||||
|
||||
if data.status_code in [201, 200]:
|
||||
for zone in data.json():
|
||||
if zone['nickname'] == name:
|
||||
counter += 1
|
||||
if counter == 1:
|
||||
exists = True
|
||||
|
||||
return(exists, counter)
|
||||
|
||||
|
||||
def get_zone_id(zone_name, current_zones):
|
||||
'''
|
||||
Returns the zone's id if it exists and is unique
|
||||
'''
|
||||
zone_exists = False
|
||||
zone_id, msg = None, None
|
||||
zone_list = []
|
||||
|
||||
for zone in current_zones:
|
||||
if zone['nickname'] == zone_name:
|
||||
zone_list.append(zone['id'])
|
||||
|
||||
counter = len(zone_list)
|
||||
|
||||
if counter == 0:
|
||||
msg = 'No matching zone found'
|
||||
elif counter == 1:
|
||||
zone_id = zone_list[0]
|
||||
zone_exists = True
|
||||
elif counter > 1:
|
||||
zone_id = None
|
||||
msg = 'Zone ID could not be returned as duplicate zone names were detected'
|
||||
|
||||
return(zone_exists, msg, counter, zone_id)
|
@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
|
||||
|
||||
|
||||
class ModuleHelperBase(object):
|
||||
module = None
|
||||
ModuleHelperException = _MHE
|
||||
|
||||
def __init__(self, module=None):
|
||||
self._changed = False
|
||||
|
||||
if module:
|
||||
self.module = module
|
||||
|
||||
if not isinstance(self.module, AnsibleModule):
|
||||
self.module = AnsibleModule(**self.module)
|
||||
|
||||
def __init_module__(self):
|
||||
pass
|
||||
|
||||
def __run__(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def __quit_module__(self):
|
||||
pass
|
||||
|
||||
def __changed__(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
try:
|
||||
return self.__changed__()
|
||||
except NotImplementedError:
|
||||
return self._changed
|
||||
|
||||
@changed.setter
|
||||
def changed(self, value):
|
||||
self._changed = value
|
||||
|
||||
def has_changed(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@module_fails_on_exception
|
||||
def run(self):
|
||||
self.__init_module__()
|
||||
self.__run__()
|
||||
self.__quit_module__()
|
||||
output = self.output
|
||||
if 'failed' not in output:
|
||||
output['failed'] = False
|
||||
self.module.exit_json(changed=self.has_changed(), **output)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, module=None):
|
||||
cls(module).run()
|
@ -0,0 +1,87 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
from functools import wraps
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException
|
||||
|
||||
|
||||
def cause_changes(on_success=None, on_failure=None):
|
||||
|
||||
def deco(func):
|
||||
if on_success is None and on_failure is None:
|
||||
return func
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
self = args[0]
|
||||
func(*args, **kwargs)
|
||||
if on_success is not None:
|
||||
self.changed = on_success
|
||||
except Exception:
|
||||
if on_failure is not None:
|
||||
self.changed = on_failure
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return deco
|
||||
|
||||
|
||||
def module_fails_on_exception(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
func(self, *args, **kwargs)
|
||||
except SystemExit:
|
||||
raise
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
except Exception as e:
|
||||
msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_mode_skip(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if not self.module.check_mode:
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_mode_skip_returns(callable=None, value=None):
|
||||
|
||||
def deco(func):
|
||||
if callable is not None:
|
||||
@wraps(func)
|
||||
def wrapper_callable(self, *args, **kwargs):
|
||||
if self.module.check_mode:
|
||||
return callable(self, *args, **kwargs)
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper_callable
|
||||
|
||||
if value is not None:
|
||||
@wraps(func)
|
||||
def wrapper_value(self, *args, **kwargs):
|
||||
if self.module.check_mode:
|
||||
return value
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper_value
|
||||
|
||||
if callable is None and value is None:
|
||||
return check_mode_skip
|
||||
|
||||
return deco
|
@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleHelperException(Exception):
|
||||
@staticmethod
|
||||
def _get_remove(key, kwargs):
|
||||
if key in kwargs:
|
||||
result = kwargs[key]
|
||||
del kwargs[key]
|
||||
return result
|
||||
return None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self)
|
||||
self.update_output = self._get_remove('update_output', kwargs) or {}
|
||||
super(ModuleHelperException, self).__init__(*args)
|
@ -0,0 +1,189 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from functools import partial
|
||||
|
||||
|
||||
class ArgFormat(object):
|
||||
"""
|
||||
Argument formatter for use as a command line parameter. Used in CmdMixin.
|
||||
"""
|
||||
BOOLEAN = 0
|
||||
PRINTF = 1
|
||||
FORMAT = 2
|
||||
BOOLEAN_NOT = 3
|
||||
|
||||
@staticmethod
|
||||
def stars_deco(num):
|
||||
if num == 1:
|
||||
def deco(f):
|
||||
return lambda v: f(*v)
|
||||
return deco
|
||||
elif num == 2:
|
||||
def deco(f):
|
||||
return lambda v: f(**v)
|
||||
return deco
|
||||
|
||||
return lambda f: f
|
||||
|
||||
def __init__(self, name, fmt=None, style=FORMAT, stars=0):
|
||||
"""
|
||||
Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
|
||||
the CLI command execution.
|
||||
:param name: Name of the argument to be formatted
|
||||
:param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
|
||||
:param style: Whether arg_format (as str) should use printf-style formatting.
|
||||
Ignored if arg_format is None or not a str (should be callable).
|
||||
:param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
|
||||
"""
|
||||
def printf_fmt(_fmt, v):
|
||||
try:
|
||||
return [_fmt % v]
|
||||
except TypeError as e:
|
||||
if e.args[0] != 'not all arguments converted during string formatting':
|
||||
raise
|
||||
return [_fmt]
|
||||
|
||||
_fmts = {
|
||||
ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
|
||||
ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]),
|
||||
ArgFormat.PRINTF: printf_fmt,
|
||||
ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
|
||||
}
|
||||
|
||||
self.name = name
|
||||
self.stars = stars
|
||||
self.style = style
|
||||
|
||||
if fmt is None:
|
||||
fmt = "{0}"
|
||||
style = ArgFormat.FORMAT
|
||||
|
||||
if isinstance(fmt, str):
|
||||
func = _fmts[style]
|
||||
self.arg_format = partial(func, fmt)
|
||||
elif isinstance(fmt, list) or isinstance(fmt, tuple):
|
||||
self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
|
||||
elif hasattr(fmt, '__call__'):
|
||||
self.arg_format = fmt
|
||||
else:
|
||||
raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
|
||||
'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
|
||||
|
||||
if stars:
|
||||
self.arg_format = (self.stars_deco(stars))(self.arg_format)
|
||||
|
||||
def to_text(self, value):
|
||||
if value is None and self.style != ArgFormat.BOOLEAN_NOT:
|
||||
return []
|
||||
func = self.arg_format
|
||||
return [str(p) for p in func(value)]
|
||||
|
||||
|
||||
class CmdMixin(object):
|
||||
"""
|
||||
Mixin for mapping module options to running a CLI command with its arguments.
|
||||
"""
|
||||
command = None
|
||||
command_args_formats = {}
|
||||
run_command_fixed_options = {}
|
||||
check_rc = False
|
||||
force_lang = "C"
|
||||
|
||||
@property
|
||||
def module_formats(self):
|
||||
result = {}
|
||||
for param in self.module.params.keys():
|
||||
result[param] = ArgFormat(param)
|
||||
return result
|
||||
|
||||
@property
|
||||
def custom_formats(self):
|
||||
result = {}
|
||||
for param, fmt_spec in self.command_args_formats.items():
|
||||
result[param] = ArgFormat(param, **fmt_spec)
|
||||
return result
|
||||
|
||||
def _calculate_args(self, extra_params=None, params=None):
|
||||
def add_arg_formatted_param(_cmd_args, arg_format, _value):
|
||||
args = list(arg_format.to_text(_value))
|
||||
return _cmd_args + args
|
||||
|
||||
def find_format(_param):
|
||||
return self.custom_formats.get(_param, self.module_formats.get(_param))
|
||||
|
||||
extra_params = extra_params or dict()
|
||||
cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
|
||||
try:
|
||||
cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
|
||||
except ValueError:
|
||||
pass
|
||||
param_list = params if params else self.vars.keys()
|
||||
|
||||
for param in param_list:
|
||||
if isinstance(param, dict):
|
||||
if len(param) != 1:
|
||||
raise self.ModuleHelperException("run_command parameter as a dict must "
|
||||
"contain only one key: {0}".format(param))
|
||||
_param = list(param.keys())[0]
|
||||
fmt = find_format(_param)
|
||||
value = param[_param]
|
||||
elif isinstance(param, str):
|
||||
if param in self.vars.keys():
|
||||
fmt = find_format(param)
|
||||
value = self.vars[param]
|
||||
elif param in extra_params:
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
else:
|
||||
raise self.ModuleHelperException('Cannot determine value for parameter: {0}'.format(param))
|
||||
else:
|
||||
raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
||||
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
||||
|
||||
return cmd_args
|
||||
|
||||
def process_command_output(self, rc, out, err):
|
||||
return rc, out, err
|
||||
|
||||
def run_command(self,
|
||||
extra_params=None,
|
||||
params=None,
|
||||
process_output=None,
|
||||
publish_rc=True,
|
||||
publish_out=True,
|
||||
publish_err=True,
|
||||
publish_cmd=True,
|
||||
*args, **kwargs):
|
||||
cmd_args = self._calculate_args(extra_params, params)
|
||||
options = dict(self.run_command_fixed_options)
|
||||
options['check_rc'] = options.get('check_rc', self.check_rc)
|
||||
options.update(kwargs)
|
||||
env_update = dict(options.get('environ_update', {}))
|
||||
if self.force_lang:
|
||||
env_update.update({
|
||||
'LANGUAGE': self.force_lang,
|
||||
'LC_ALL': self.force_lang,
|
||||
})
|
||||
self.update_output(force_lang=self.force_lang)
|
||||
options['environ_update'] = env_update
|
||||
rc, out, err = self.module.run_command(cmd_args, *args, **options)
|
||||
if publish_rc:
|
||||
self.update_output(rc=rc)
|
||||
if publish_out:
|
||||
self.update_output(stdout=out)
|
||||
if publish_err:
|
||||
self.update_output(stderr=err)
|
||||
if publish_cmd:
|
||||
self.update_output(cmd_args=cmd_args)
|
||||
if process_output is None:
|
||||
_process = self.process_command_output
|
||||
else:
|
||||
_process = process_output
|
||||
|
||||
return _process(rc, out, err)
|
@ -0,0 +1,61 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class DeprecateAttrsMixin(object):
|
||||
|
||||
def _deprecate_setup(self, attr, target, module):
|
||||
if target is None:
|
||||
target = self
|
||||
if not hasattr(target, attr):
|
||||
raise ValueError("Target {0} has no attribute {1}".format(target, attr))
|
||||
if module is None:
|
||||
if isinstance(target, AnsibleModule):
|
||||
module = target
|
||||
elif hasattr(target, "module") and isinstance(target.module, AnsibleModule):
|
||||
module = target.module
|
||||
else:
|
||||
raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.")
|
||||
|
||||
# setup internal state dicts
|
||||
value_attr = "__deprecated_attr_value"
|
||||
trigger_attr = "__deprecated_attr_trigger"
|
||||
if not hasattr(target, value_attr):
|
||||
setattr(target, value_attr, {})
|
||||
if not hasattr(target, trigger_attr):
|
||||
setattr(target, trigger_attr, {})
|
||||
value_dict = getattr(target, value_attr)
|
||||
trigger_dict = getattr(target, trigger_attr)
|
||||
return target, module, value_dict, trigger_dict
|
||||
|
||||
def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None):
|
||||
target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module)
|
||||
|
||||
value_dict[attr] = getattr(target, attr, value)
|
||||
trigger_dict[attr] = False
|
||||
|
||||
def _trigger():
|
||||
if not trigger_dict[attr]:
|
||||
module.deprecate(msg, version=version, date=date, collection_name=collection_name)
|
||||
trigger_dict[attr] = True
|
||||
|
||||
def _getter(_self):
|
||||
_trigger()
|
||||
return value_dict[attr]
|
||||
|
||||
def _setter(_self, new_value):
|
||||
_trigger()
|
||||
value_dict[attr] = new_value
|
||||
|
||||
# override attribute
|
||||
prop = property(_getter)
|
||||
setattr(target, attr, prop)
|
||||
setattr(target, "_{0}_setter".format(attr), prop.setter(_setter))
|
@ -0,0 +1,58 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
|
||||
|
||||
|
||||
class DependencyCtxMgr(object):
|
||||
def __init__(self, name, msg=None):
|
||||
self.name = name
|
||||
self.msg = msg
|
||||
self.has_it = False
|
||||
self.exc_type = None
|
||||
self.exc_val = None
|
||||
self.exc_tb = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.has_it = exc_type is None
|
||||
self.exc_type = exc_type
|
||||
self.exc_val = exc_val
|
||||
self.exc_tb = exc_tb
|
||||
return not self.has_it
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self.msg or str(self.exc_val)
|
||||
|
||||
|
||||
class DependencyMixin(ModuleHelperBase):
|
||||
_dependencies = []
|
||||
|
||||
@classmethod
|
||||
def dependency(cls, name, msg):
|
||||
cls._dependencies.append(DependencyCtxMgr(name, msg))
|
||||
return cls._dependencies[-1]
|
||||
|
||||
def fail_on_missing_deps(self):
|
||||
for d in self._dependencies:
|
||||
if not d.has_it:
|
||||
self.module.fail_json(changed=False,
|
||||
exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)),
|
||||
msg=d.text,
|
||||
**self.output)
|
||||
|
||||
@module_fails_on_exception
|
||||
def run(self):
|
||||
self.fail_on_missing_deps()
|
||||
super(DependencyMixin, self).run()
|
@ -0,0 +1,39 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class StateMixin(object):
|
||||
state_param = 'state'
|
||||
default_state = None
|
||||
|
||||
def _state(self):
|
||||
state = self.module.params.get(self.state_param)
|
||||
return self.default_state if state is None else state
|
||||
|
||||
def _method(self, state):
|
||||
return "{0}_{1}".format(self.state_param, state)
|
||||
|
||||
def __run__(self):
|
||||
state = self._state()
|
||||
self.vars.state = state
|
||||
|
||||
# resolve aliases
|
||||
if state not in self.module.params:
|
||||
aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])]
|
||||
if aliased:
|
||||
state = aliased[0]
|
||||
self.vars.effective_state = state
|
||||
|
||||
method = self._method(state)
|
||||
if not hasattr(self, method):
|
||||
return self.__state_fallback__()
|
||||
func = getattr(self, method)
|
||||
return func()
|
||||
|
||||
def __state_fallback__(self):
|
||||
raise ValueError("Cannot find method: {0}".format(self._method(self._state())))
|
@ -0,0 +1,134 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import copy
|
||||
|
||||
|
||||
class VarMeta(object):
|
||||
NOTHING = object()
|
||||
|
||||
def __init__(self, diff=False, output=True, change=None, fact=False):
|
||||
self.init = False
|
||||
self.initial_value = None
|
||||
self.value = None
|
||||
|
||||
self.diff = diff
|
||||
self.change = diff if change is None else change
|
||||
self.output = output
|
||||
self.fact = fact
|
||||
|
||||
def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING):
|
||||
if diff is not None:
|
||||
self.diff = diff
|
||||
if output is not None:
|
||||
self.output = output
|
||||
if change is not None:
|
||||
self.change = change
|
||||
if fact is not None:
|
||||
self.fact = fact
|
||||
if initial_value is not self.NOTHING:
|
||||
self.initial_value = copy.deepcopy(initial_value)
|
||||
|
||||
def set_value(self, value):
|
||||
if not self.init:
|
||||
self.initial_value = copy.deepcopy(value)
|
||||
self.init = True
|
||||
self.value = value
|
||||
return self
|
||||
|
||||
@property
|
||||
def has_changed(self):
|
||||
return self.change and (self.initial_value != self.value)
|
||||
|
||||
@property
|
||||
def diff_result(self):
|
||||
return None if not (self.diff and self.has_changed) else {
|
||||
'before': self.initial_value,
|
||||
'after': self.value,
|
||||
}
|
||||
|
||||
def __str__(self):
|
||||
return "<VarMeta: value={0}, initial={1}, diff={2}, output={3}, change={4}>".format(
|
||||
self.value, self.initial_value, self.diff, self.output, self.change
|
||||
)
|
||||
|
||||
|
||||
class VarDict(object):
|
||||
def __init__(self):
|
||||
self._data = dict()
|
||||
self._meta = dict()
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self._data[item]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.set(key, value)
|
||||
|
||||
def __getattr__(self, item):
|
||||
try:
|
||||
return self._data[item]
|
||||
except KeyError:
|
||||
return getattr(self._data, item)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key in ('_data', '_meta'):
|
||||
super(VarDict, self).__setattr__(key, value)
|
||||
else:
|
||||
self.set(key, value)
|
||||
|
||||
def meta(self, name):
|
||||
return self._meta[name]
|
||||
|
||||
def set_meta(self, name, **kwargs):
|
||||
self.meta(name).set(**kwargs)
|
||||
|
||||
def set(self, name, value, **kwargs):
|
||||
if name in ('_data', '_meta'):
|
||||
raise ValueError("Names _data and _meta are reserved for use by ModuleHelper")
|
||||
self._data[name] = value
|
||||
if name in self._meta:
|
||||
meta = self.meta(name)
|
||||
else:
|
||||
meta = VarMeta(**kwargs)
|
||||
meta.set_value(value)
|
||||
self._meta[name] = meta
|
||||
|
||||
def output(self):
|
||||
return dict((k, v) for k, v in self._data.items() if self.meta(k).output)
|
||||
|
||||
def diff(self):
|
||||
diff_results = [(k, self.meta(k).diff_result) for k in self._data]
|
||||
diff_results = [dr for dr in diff_results if dr[1] is not None]
|
||||
if diff_results:
|
||||
before = dict((dr[0], dr[1]['before']) for dr in diff_results)
|
||||
after = dict((dr[0], dr[1]['after']) for dr in diff_results)
|
||||
return {'before': before, 'after': after}
|
||||
return None
|
||||
|
||||
def facts(self):
|
||||
facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact)
|
||||
return facts_result if facts_result else None
|
||||
|
||||
def change_vars(self):
|
||||
return [v for v in self._data if self.meta(v).change]
|
||||
|
||||
def has_changed(self, v):
|
||||
return self._meta[v].has_changed
|
||||
|
||||
|
||||
class VarsMixin(object):
|
||||
|
||||
def __init__(self, module=None):
|
||||
self.vars = VarDict()
|
||||
super(VarsMixin, self).__init__(module)
|
||||
|
||||
def update_vars(self, meta=None, **kwargs):
|
||||
if meta is None:
|
||||
meta = {}
|
||||
for k, v in kwargs.items():
|
||||
self.vars.set(k, v, **meta)
|
@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
|
||||
|
||||
|
||||
class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
diff_params = ()
|
||||
change_params = ()
|
||||
facts_params = ()
|
||||
|
||||
VarDict = _VD # for backward compatibility, will be deprecated at some point
|
||||
|
||||
def __init__(self, module=None):
|
||||
super(ModuleHelper, self).__init__(module)
|
||||
for name, value in self.module.params.items():
|
||||
self.vars.set(
|
||||
name, value,
|
||||
diff=name in self.diff_params,
|
||||
output=name in self.output_params,
|
||||
change=None if not self.change_params else name in self.change_params,
|
||||
fact=name in self.facts_params,
|
||||
)
|
||||
|
||||
self._deprecate_attr(
|
||||
attr="VarDict",
|
||||
msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from "
|
||||
"the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead",
|
||||
version="6.0.0",
|
||||
collection_name="community.general",
|
||||
target=ModuleHelper,
|
||||
module=self.module)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.update_vars(meta={"output": True}, **kwargs)
|
||||
|
||||
def update_facts(self, **kwargs):
|
||||
self.update_vars(meta={"fact": True}, **kwargs)
|
||||
|
||||
def _vars_changed(self):
|
||||
return any(self.vars.has_changed(v) for v in self.vars.change_vars())
|
||||
|
||||
def has_changed(self):
|
||||
return self.changed or self._vars_changed()
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
result = dict(self.vars.output())
|
||||
if self.facts_name:
|
||||
facts = self.vars.facts()
|
||||
if facts is not None:
|
||||
result['ansible_facts'] = {self.facts_name: facts}
|
||||
if self.module._diff:
|
||||
diff = result.get('diff', {})
|
||||
vars_diff = self.vars.diff() or {}
|
||||
result['diff'] = dict_merge(dict(diff), vars_diff)
|
||||
|
||||
for varname in result:
|
||||
if varname in self._output_conflict_list:
|
||||
result["_" + varname] = result[varname]
|
||||
del result[varname]
|
||||
return result
|
||||
|
||||
|
||||
class StateModuleHelper(StateMixin, ModuleHelper):
|
||||
pass
|
||||
|
||||
|
||||
class CmdModuleHelper(CmdMixin, ModuleHelper):
|
||||
pass
|
||||
|
||||
|
||||
class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper):
|
||||
pass
|
@ -0,0 +1,18 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
|
||||
ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict
|
@ -0,0 +1,370 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""
|
||||
Pritunl API that offers CRUD operations on Pritunl Organizations and Users
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class PritunlException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def pritunl_argument_spec():
|
||||
return dict(
|
||||
pritunl_url=dict(required=True, type="str"),
|
||||
pritunl_api_token=dict(required=True, type="str", no_log=False),
|
||||
pritunl_api_secret=dict(required=True, type="str", no_log=True),
|
||||
validate_certs=dict(required=False, type="bool", default=True),
|
||||
)
|
||||
|
||||
|
||||
def get_pritunl_settings(module):
|
||||
"""
|
||||
Helper function to set required Pritunl request params from module arguments.
|
||||
"""
|
||||
return {
|
||||
"api_token": module.params.get("pritunl_api_token"),
|
||||
"api_secret": module.params.get("pritunl_api_secret"),
|
||||
"base_url": module.params.get("pritunl_url"),
|
||||
"validate_certs": module.params.get("validate_certs"),
|
||||
}
|
||||
|
||||
|
||||
def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="GET",
|
||||
path="/organization",
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="DELETE",
|
||||
path="/organization/%s" % (organization_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/organization/%s",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(organization_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _get_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="GET",
|
||||
path="/user/%s" % organization_id,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="DELETE",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/user/%s" % organization_id,
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _put_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_id,
|
||||
user_data,
|
||||
validate_certs=True,
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="PUT",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def list_pritunl_organizations(
|
||||
api_token, api_secret, base_url, validate_certs=True, filters=None
|
||||
):
|
||||
orgs = []
|
||||
|
||||
response = _get_pritunl_organizations(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve organizations from Pritunl")
|
||||
else:
|
||||
for org in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
orgs.append(org)
|
||||
else:
|
||||
if not any(
|
||||
filter_val != org[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
orgs.append(org)
|
||||
|
||||
return orgs
|
||||
|
||||
|
||||
def list_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None
|
||||
):
|
||||
users = []
|
||||
|
||||
response = _get_pritunl_users(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
organization_id=organization_id,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve users from Pritunl")
|
||||
else:
|
||||
for user in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
users.append(user)
|
||||
|
||||
else:
|
||||
if not any(
|
||||
filter_val != user[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
users.append(user)
|
||||
|
||||
return users
|
||||
|
||||
|
||||
def post_pritunl_organization(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_name,
|
||||
validate_certs=True,
|
||||
):
|
||||
response = _post_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_data={"name": organization_name},
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not add organization %s to Pritunl" % (organization_name)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def post_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_data,
|
||||
user_id=None,
|
||||
validate_certs=True,
|
||||
):
|
||||
# If user_id is provided will do PUT otherwise will do POST
|
||||
if user_id is None:
|
||||
response = _post_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# user POST request returns an array of a single item,
|
||||
# so return this item instead of the list
|
||||
return json.loads(response.read())[0]
|
||||
else:
|
||||
response = _put_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not update user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove organization %s from Pritunl" % (organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def pritunl_auth_request(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
method,
|
||||
path,
|
||||
validate_certs=True,
|
||||
headers=None,
|
||||
data=None,
|
||||
):
|
||||
"""
|
||||
Send an API call to a Pritunl server.
|
||||
Taken from https://pritunl.com/api and adaped work with Ansible open_url
|
||||
"""
|
||||
auth_timestamp = str(int(time.time()))
|
||||
auth_nonce = uuid.uuid4().hex
|
||||
|
||||
auth_string = "&".join(
|
||||
[api_token, auth_timestamp, auth_nonce, method.upper(), path]
|
||||
+ ([data] if data else [])
|
||||
)
|
||||
|
||||
auth_signature = base64.b64encode(
|
||||
hmac.new(
|
||||
api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256
|
||||
).digest()
|
||||
)
|
||||
|
||||
auth_headers = {
|
||||
"Auth-Token": api_token,
|
||||
"Auth-Timestamp": auth_timestamp,
|
||||
"Auth-Nonce": auth_nonce,
|
||||
"Auth-Signature": auth_signature,
|
||||
}
|
||||
|
||||
if headers:
|
||||
auth_headers.update(headers)
|
||||
|
||||
try:
|
||||
uri = "%s%s" % (base_url, path)
|
||||
|
||||
return open_url(
|
||||
uri,
|
||||
method=method.upper(),
|
||||
headers=auth_headers,
|
||||
data=data,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
except Exception as e:
|
||||
raise PritunlException(e)
|
@ -0,0 +1,264 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import time
|
||||
|
||||
|
||||
class OneAndOneResources:
|
||||
firewall_policy = 'firewall_policy'
|
||||
load_balancer = 'load_balancer'
|
||||
monitoring_policy = 'monitoring_policy'
|
||||
private_network = 'private_network'
|
||||
public_ip = 'public_ip'
|
||||
role = 'role'
|
||||
server = 'server'
|
||||
user = 'user'
|
||||
vpn = 'vpn'
|
||||
|
||||
|
||||
def get_resource(oneandone_conn, resource_type, resource_id):
|
||||
switcher = {
|
||||
'firewall_policy': oneandone_conn.get_firewall,
|
||||
'load_balancer': oneandone_conn.get_load_balancer,
|
||||
'monitoring_policy': oneandone_conn.get_monitoring_policy,
|
||||
'private_network': oneandone_conn.get_private_network,
|
||||
'public_ip': oneandone_conn.get_public_ip,
|
||||
'role': oneandone_conn.get_role,
|
||||
'server': oneandone_conn.get_server,
|
||||
'user': oneandone_conn.get_user,
|
||||
'vpn': oneandone_conn.get_vpn,
|
||||
}
|
||||
|
||||
return switcher.get(resource_type, None)(resource_id)
|
||||
|
||||
|
||||
def get_datacenter(oneandone_conn, datacenter, full_object=False):
|
||||
"""
|
||||
Validates the datacenter exists by ID or country code.
|
||||
Returns the datacenter ID.
|
||||
"""
|
||||
for _datacenter in oneandone_conn.list_datacenters():
|
||||
if datacenter in (_datacenter['id'], _datacenter['country_code']):
|
||||
if full_object:
|
||||
return _datacenter
|
||||
return _datacenter['id']
|
||||
|
||||
|
||||
def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False):
|
||||
"""
|
||||
Validates the fixed instance size exists by ID or name.
|
||||
Return the instance size ID.
|
||||
"""
|
||||
for _fixed_instance_size in oneandone_conn.fixed_server_flavors():
|
||||
if fixed_instance_size in (_fixed_instance_size['id'],
|
||||
_fixed_instance_size['name']):
|
||||
if full_object:
|
||||
return _fixed_instance_size
|
||||
return _fixed_instance_size['id']
|
||||
|
||||
|
||||
def get_appliance(oneandone_conn, appliance, full_object=False):
|
||||
"""
|
||||
Validates the appliance exists by ID or name.
|
||||
Return the appliance ID.
|
||||
"""
|
||||
for _appliance in oneandone_conn.list_appliances(q='IMAGE'):
|
||||
if appliance in (_appliance['id'], _appliance['name']):
|
||||
if full_object:
|
||||
return _appliance
|
||||
return _appliance['id']
|
||||
|
||||
|
||||
def get_private_network(oneandone_conn, private_network, full_object=False):
|
||||
"""
|
||||
Validates the private network exists by ID or name.
|
||||
Return the private network ID.
|
||||
"""
|
||||
for _private_network in oneandone_conn.list_private_networks():
|
||||
if private_network in (_private_network['name'],
|
||||
_private_network['id']):
|
||||
if full_object:
|
||||
return _private_network
|
||||
return _private_network['id']
|
||||
|
||||
|
||||
def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
|
||||
"""
|
||||
Validates the monitoring policy exists by ID or name.
|
||||
Return the monitoring policy ID.
|
||||
"""
|
||||
for _monitoring_policy in oneandone_conn.list_monitoring_policies():
|
||||
if monitoring_policy in (_monitoring_policy['name'],
|
||||
_monitoring_policy['id']):
|
||||
if full_object:
|
||||
return _monitoring_policy
|
||||
return _monitoring_policy['id']
|
||||
|
||||
|
||||
def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
|
||||
"""
|
||||
Validates the firewall policy exists by ID or name.
|
||||
Return the firewall policy ID.
|
||||
"""
|
||||
for _firewall_policy in oneandone_conn.list_firewall_policies():
|
||||
if firewall_policy in (_firewall_policy['name'],
|
||||
_firewall_policy['id']):
|
||||
if full_object:
|
||||
return _firewall_policy
|
||||
return _firewall_policy['id']
|
||||
|
||||
|
||||
def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
|
||||
"""
|
||||
Validates the load balancer exists by ID or name.
|
||||
Return the load balancer ID.
|
||||
"""
|
||||
for _load_balancer in oneandone_conn.list_load_balancers():
|
||||
if load_balancer in (_load_balancer['name'],
|
||||
_load_balancer['id']):
|
||||
if full_object:
|
||||
return _load_balancer
|
||||
return _load_balancer['id']
|
||||
|
||||
|
||||
def get_server(oneandone_conn, instance, full_object=False):
|
||||
"""
|
||||
Validates that the server exists whether by ID or name.
|
||||
Returns the server if one was found.
|
||||
"""
|
||||
for server in oneandone_conn.list_servers(per_page=1000):
|
||||
if instance in (server['id'], server['name']):
|
||||
if full_object:
|
||||
return server
|
||||
return server['id']
|
||||
|
||||
|
||||
def get_user(oneandone_conn, user, full_object=False):
|
||||
"""
|
||||
Validates that the user exists by ID or a name.
|
||||
Returns the user if one was found.
|
||||
"""
|
||||
for _user in oneandone_conn.list_users(per_page=1000):
|
||||
if user in (_user['id'], _user['name']):
|
||||
if full_object:
|
||||
return _user
|
||||
return _user['id']
|
||||
|
||||
|
||||
def get_role(oneandone_conn, role, full_object=False):
|
||||
"""
|
||||
Given a name, validates that the role exists
|
||||
whether it is a proper ID or a name.
|
||||
Returns the role if one was found, else None.
|
||||
"""
|
||||
for _role in oneandone_conn.list_roles(per_page=1000):
|
||||
if role in (_role['id'], _role['name']):
|
||||
if full_object:
|
||||
return _role
|
||||
return _role['id']
|
||||
|
||||
|
||||
def get_vpn(oneandone_conn, vpn, full_object=False):
|
||||
"""
|
||||
Validates that the vpn exists by ID or a name.
|
||||
Returns the vpn if one was found.
|
||||
"""
|
||||
for _vpn in oneandone_conn.list_vpns(per_page=1000):
|
||||
if vpn in (_vpn['id'], _vpn['name']):
|
||||
if full_object:
|
||||
return _vpn
|
||||
return _vpn['id']
|
||||
|
||||
|
||||
def get_public_ip(oneandone_conn, public_ip, full_object=False):
|
||||
"""
|
||||
Validates that the public ip exists by ID or a name.
|
||||
Returns the public ip if one was found.
|
||||
"""
|
||||
for _public_ip in oneandone_conn.list_public_ips(per_page=1000):
|
||||
if public_ip in (_public_ip['id'], _public_ip['ip']):
|
||||
if full_object:
|
||||
return _public_ip
|
||||
return _public_ip['id']
|
||||
|
||||
|
||||
def wait_for_resource_creation_completion(oneandone_conn,
|
||||
resource_type,
|
||||
resource_id,
|
||||
wait_timeout,
|
||||
wait_interval):
|
||||
"""
|
||||
Waits for the resource create operation to complete based on the timeout period.
|
||||
"""
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time():
|
||||
time.sleep(wait_interval)
|
||||
|
||||
# Refresh the resource info
|
||||
resource = get_resource(oneandone_conn, resource_type, resource_id)
|
||||
|
||||
if resource_type == OneAndOneResources.server:
|
||||
resource_state = resource['status']['state']
|
||||
else:
|
||||
resource_state = resource['state']
|
||||
|
||||
if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or
|
||||
(resource_type != OneAndOneResources.server and resource_state.lower() == 'active')):
|
||||
return
|
||||
elif resource_state.lower() == 'failed':
|
||||
raise Exception('%s creation failed for %s' % (resource_type, resource_id))
|
||||
elif resource_state.lower() in ('active',
|
||||
'enabled',
|
||||
'deploying',
|
||||
'configuring'):
|
||||
continue
|
||||
else:
|
||||
raise Exception(
|
||||
'Unknown %s state %s' % (resource_type, resource_state))
|
||||
|
||||
raise Exception(
|
||||
'Timed out waiting for %s completion for %s' % (resource_type, resource_id))
|
||||
|
||||
|
||||
def wait_for_resource_deletion_completion(oneandone_conn,
|
||||
resource_type,
|
||||
resource_id,
|
||||
wait_timeout,
|
||||
wait_interval):
|
||||
"""
|
||||
Waits for the resource delete operation to complete based on the timeout period.
|
||||
"""
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time():
|
||||
time.sleep(wait_interval)
|
||||
|
||||
# Refresh the operation info
|
||||
logs = oneandone_conn.list_logs(q='DELETE',
|
||||
period='LAST_HOUR',
|
||||
sort='-start_date')
|
||||
|
||||
if resource_type == OneAndOneResources.server:
|
||||
_type = 'VM'
|
||||
elif resource_type == OneAndOneResources.private_network:
|
||||
_type = 'PRIVATENETWORK'
|
||||
else:
|
||||
raise Exception(
|
||||
'Unsupported wait_for delete operation for %s resource' % resource_type)
|
||||
|
||||
for log in logs:
|
||||
if (log['resource']['id'] == resource_id and
|
||||
log['action'] == 'DELETE' and
|
||||
log['type'] == _type and
|
||||
log['status']['state'] == 'OK'):
|
||||
return
|
||||
raise Exception(
|
||||
'Timed out waiting for %s deletion for %s' % (resource_type, resource_id))
|
@ -0,0 +1,486 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import abc
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
|
||||
HPE_ONEVIEW_IMP_ERR = None
|
||||
try:
|
||||
from hpOneView.oneview_client import OneViewClient
|
||||
HAS_HPE_ONEVIEW = True
|
||||
except ImportError:
|
||||
HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
|
||||
HAS_HPE_ONEVIEW = False
|
||||
|
||||
from ansible.module_utils import six
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common._collections_compat import Mapping
|
||||
|
||||
|
||||
def transform_list_to_dict(list_):
|
||||
"""
|
||||
Transforms a list into a dictionary, putting values as keys.
|
||||
|
||||
:arg list list_: List of values
|
||||
:return: dict: dictionary built
|
||||
"""
|
||||
|
||||
ret = {}
|
||||
|
||||
if not list_:
|
||||
return ret
|
||||
|
||||
for value in list_:
|
||||
if isinstance(value, Mapping):
|
||||
ret.update(value)
|
||||
else:
|
||||
ret[to_native(value, errors='surrogate_or_strict')] = True
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
|
||||
"""
|
||||
Merge two lists by the key. It basically:
|
||||
|
||||
1. Adds the items that are present on updated_list and are absent on original_list.
|
||||
|
||||
2. Removes items that are absent on updated_list and are present on original_list.
|
||||
|
||||
3. For all items that are in both lists, overwrites the values from the original item by the updated item.
|
||||
|
||||
:arg list original_list: original list.
|
||||
:arg list updated_list: list with changes.
|
||||
:arg str key: unique identifier.
|
||||
:arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
|
||||
if its values are null.
|
||||
:return: list: Lists merged.
|
||||
"""
|
||||
ignore_when_null = [] if ignore_when_null is None else ignore_when_null
|
||||
|
||||
if not original_list:
|
||||
return updated_list
|
||||
|
||||
items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
|
||||
|
||||
merged_items = collections.OrderedDict()
|
||||
|
||||
for item in updated_list:
|
||||
item_key = item[key]
|
||||
if item_key in items_map:
|
||||
for ignored_key in ignore_when_null:
|
||||
if ignored_key in item and item[ignored_key] is None:
|
||||
item.pop(ignored_key)
|
||||
merged_items[item_key] = items_map[item_key]
|
||||
merged_items[item_key].update(item)
|
||||
else:
|
||||
merged_items[item_key] = item
|
||||
|
||||
return list(merged_items.values())
|
||||
|
||||
|
||||
def _str_sorted(obj):
|
||||
if isinstance(obj, Mapping):
|
||||
return json.dumps(obj, sort_keys=True)
|
||||
else:
|
||||
return str(obj)
|
||||
|
||||
|
||||
def _standardize_value(value):
|
||||
"""
|
||||
Convert value to string to enhance the comparison.
|
||||
|
||||
:arg value: Any object type.
|
||||
|
||||
:return: str: Converted value.
|
||||
"""
|
||||
if isinstance(value, float) and value.is_integer():
|
||||
# Workaround to avoid erroneous comparison between int and float
|
||||
# Removes zero from integer floats
|
||||
value = int(value)
|
||||
|
||||
return str(value)
|
||||
|
||||
|
||||
class OneViewModuleException(Exception):
|
||||
"""
|
||||
OneView base Exception.
|
||||
|
||||
Attributes:
|
||||
msg (str): Exception message.
|
||||
oneview_response (dict): OneView rest response.
|
||||
"""
|
||||
|
||||
def __init__(self, data):
|
||||
self.msg = None
|
||||
self.oneview_response = None
|
||||
|
||||
if isinstance(data, six.string_types):
|
||||
self.msg = data
|
||||
else:
|
||||
self.oneview_response = data
|
||||
|
||||
if data and isinstance(data, dict):
|
||||
self.msg = data.get('message')
|
||||
|
||||
if self.oneview_response:
|
||||
Exception.__init__(self, self.msg, self.oneview_response)
|
||||
else:
|
||||
Exception.__init__(self, self.msg)
|
||||
|
||||
|
||||
class OneViewModuleTaskError(OneViewModuleException):
|
||||
"""
|
||||
OneView Task Error Exception.
|
||||
|
||||
Attributes:
|
||||
msg (str): Exception message.
|
||||
error_code (str): A code which uniquely identifies the specific error.
|
||||
"""
|
||||
|
||||
def __init__(self, msg, error_code=None):
|
||||
super(OneViewModuleTaskError, self).__init__(msg)
|
||||
self.error_code = error_code
|
||||
|
||||
|
||||
class OneViewModuleValueError(OneViewModuleException):
|
||||
"""
|
||||
OneView Value Error.
|
||||
The exception is raised when the data contains an inappropriate value.
|
||||
|
||||
Attributes:
|
||||
msg (str): Exception message.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class OneViewModuleResourceNotFound(OneViewModuleException):
|
||||
"""
|
||||
OneView Resource Not Found Exception.
|
||||
The exception is raised when an associated resource was not found.
|
||||
|
||||
Attributes:
|
||||
msg (str): Exception message.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class OneViewModuleBase(object):
|
||||
MSG_CREATED = 'Resource created successfully.'
|
||||
MSG_UPDATED = 'Resource updated successfully.'
|
||||
MSG_DELETED = 'Resource deleted successfully.'
|
||||
MSG_ALREADY_PRESENT = 'Resource is already present.'
|
||||
MSG_ALREADY_ABSENT = 'Resource is already absent.'
|
||||
MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
|
||||
|
||||
ONEVIEW_COMMON_ARGS = dict(
|
||||
config=dict(type='path'),
|
||||
hostname=dict(type='str'),
|
||||
username=dict(type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
api_version=dict(type='int'),
|
||||
image_streamer_hostname=dict(type='str')
|
||||
)
|
||||
|
||||
ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
|
||||
|
||||
resource_client = None
|
||||
|
||||
def __init__(self, additional_arg_spec=None, validate_etag_support=False, supports_check_mode=False):
|
||||
"""
|
||||
OneViewModuleBase constructor.
|
||||
|
||||
:arg dict additional_arg_spec: Additional argument spec definition.
|
||||
:arg bool validate_etag_support: Enables support to eTag validation.
|
||||
"""
|
||||
argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
|
||||
|
||||
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode)
|
||||
|
||||
self._check_hpe_oneview_sdk()
|
||||
self._create_oneview_client()
|
||||
|
||||
self.state = self.module.params.get('state')
|
||||
self.data = self.module.params.get('data')
|
||||
|
||||
# Preload params for get_all - used by facts
|
||||
self.facts_params = self.module.params.get('params') or {}
|
||||
|
||||
# Preload options as dict - used by facts
|
||||
self.options = transform_list_to_dict(self.module.params.get('options'))
|
||||
|
||||
self.validate_etag_support = validate_etag_support
|
||||
|
||||
def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
|
||||
|
||||
merged_arg_spec = dict()
|
||||
merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
|
||||
|
||||
if validate_etag_support:
|
||||
merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
|
||||
|
||||
if additional_arg_spec:
|
||||
merged_arg_spec.update(additional_arg_spec)
|
||||
|
||||
return merged_arg_spec
|
||||
|
||||
def _check_hpe_oneview_sdk(self):
|
||||
if not HAS_HPE_ONEVIEW:
|
||||
self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
|
||||
|
||||
def _create_oneview_client(self):
|
||||
if self.module.params.get('hostname'):
|
||||
config = dict(ip=self.module.params['hostname'],
|
||||
credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
|
||||
api_version=self.module.params['api_version'],
|
||||
image_streamer_ip=self.module.params['image_streamer_hostname'])
|
||||
self.oneview_client = OneViewClient(config)
|
||||
elif not self.module.params['config']:
|
||||
self.oneview_client = OneViewClient.from_environment_variables()
|
||||
else:
|
||||
self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
|
||||
|
||||
@abc.abstractmethod
|
||||
def execute_module(self):
|
||||
"""
|
||||
Abstract method, must be implemented by the inheritor.
|
||||
|
||||
This method is called from the run method. It should contains the module logic
|
||||
|
||||
:return: dict: It must return a dictionary with the attributes for the module result,
|
||||
such as ansible_facts, msg and changed.
|
||||
"""
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Common implementation of the OneView run modules.
|
||||
|
||||
It calls the inheritor 'execute_module' function and sends the return to the Ansible.
|
||||
|
||||
It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
|
||||
|
||||
"""
|
||||
try:
|
||||
if self.validate_etag_support:
|
||||
if not self.module.params.get('validate_etag'):
|
||||
self.oneview_client.connection.disable_etag_validation()
|
||||
|
||||
result = self.execute_module()
|
||||
|
||||
if "changed" not in result:
|
||||
result['changed'] = False
|
||||
|
||||
self.module.exit_json(**result)
|
||||
|
||||
except OneViewModuleException as exception:
|
||||
error_msg = '; '.join(to_native(e) for e in exception.args)
|
||||
self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
|
||||
|
||||
def resource_absent(self, resource, method='delete'):
|
||||
"""
|
||||
Generic implementation of the absent state for the OneView resources.
|
||||
|
||||
It checks if the resource needs to be removed.
|
||||
|
||||
:arg dict resource: Resource to delete.
|
||||
:arg str method: Function of the OneView client that will be called for resource deletion.
|
||||
Usually delete or remove.
|
||||
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
|
||||
"""
|
||||
if resource:
|
||||
getattr(self.resource_client, method)(resource)
|
||||
|
||||
return {"changed": True, "msg": self.MSG_DELETED}
|
||||
else:
|
||||
return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
|
||||
|
||||
def get_by_name(self, name):
|
||||
"""
|
||||
Generic get by name implementation.
|
||||
|
||||
:arg str name: Resource name to search for.
|
||||
|
||||
:return: The resource found or None.
|
||||
"""
|
||||
result = self.resource_client.get_by('name', name)
|
||||
return result[0] if result else None
|
||||
|
||||
def resource_present(self, resource, fact_name, create_method='create'):
|
||||
"""
|
||||
Generic implementation of the present state for the OneView resources.
|
||||
|
||||
It checks if the resource needs to be created or updated.
|
||||
|
||||
:arg dict resource: Resource to create or update.
|
||||
:arg str fact_name: Name of the fact returned to the Ansible.
|
||||
:arg str create_method: Function of the OneView client that will be called for resource creation.
|
||||
Usually create or add.
|
||||
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
|
||||
"""
|
||||
|
||||
changed = False
|
||||
if "newName" in self.data:
|
||||
self.data["name"] = self.data.pop("newName")
|
||||
|
||||
if not resource:
|
||||
resource = getattr(self.resource_client, create_method)(self.data)
|
||||
msg = self.MSG_CREATED
|
||||
changed = True
|
||||
|
||||
else:
|
||||
merged_data = resource.copy()
|
||||
merged_data.update(self.data)
|
||||
|
||||
if self.compare(resource, merged_data):
|
||||
msg = self.MSG_ALREADY_PRESENT
|
||||
else:
|
||||
resource = self.resource_client.update(merged_data)
|
||||
changed = True
|
||||
msg = self.MSG_UPDATED
|
||||
|
||||
return dict(
|
||||
msg=msg,
|
||||
changed=changed,
|
||||
ansible_facts={fact_name: resource}
|
||||
)
|
||||
|
||||
def resource_scopes_set(self, state, fact_name, scope_uris):
|
||||
"""
|
||||
Generic implementation of the scopes update PATCH for the OneView resources.
|
||||
It checks if the resource needs to be updated with the current scopes.
|
||||
This method is meant to be run after ensuring the present state.
|
||||
:arg dict state: Dict containing the data from the last state results in the resource.
|
||||
It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
|
||||
:arg str fact_name: Name of the fact returned to the Ansible.
|
||||
:arg list scope_uris: List with all the scope URIs to be added to the resource.
|
||||
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
|
||||
"""
|
||||
if scope_uris is None:
|
||||
scope_uris = []
|
||||
resource = state['ansible_facts'][fact_name]
|
||||
operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
|
||||
|
||||
if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
|
||||
state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
|
||||
state['changed'] = True
|
||||
state['msg'] = self.MSG_UPDATED
|
||||
|
||||
return state
|
||||
|
||||
def compare(self, first_resource, second_resource):
|
||||
"""
|
||||
Recursively compares dictionary contents equivalence, ignoring types and elements order.
|
||||
Particularities of the comparison:
|
||||
- Inexistent key = None
|
||||
- These values are considered equal: None, empty, False
|
||||
- Lists are compared value by value after a sort, if they have same size.
|
||||
- Each element is converted to str before the comparison.
|
||||
:arg dict first_resource: first dictionary
|
||||
:arg dict second_resource: second dictionary
|
||||
:return: bool: True when equal, False when different.
|
||||
"""
|
||||
resource1 = first_resource
|
||||
resource2 = second_resource
|
||||
|
||||
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
|
||||
|
||||
# The first resource is True / Not Null and the second resource is False / Null
|
||||
if resource1 and not resource2:
|
||||
self.module.log("resource1 and not resource2. " + debug_resources)
|
||||
return False
|
||||
|
||||
# Checks all keys in first dict against the second dict
|
||||
for key in resource1:
|
||||
if key not in resource2:
|
||||
if resource1[key] is not None:
|
||||
# Inexistent key is equivalent to exist with value None
|
||||
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
|
||||
return False
|
||||
# If both values are null, empty or False it will be considered equal.
|
||||
elif not resource1[key] and not resource2[key]:
|
||||
continue
|
||||
elif isinstance(resource1[key], Mapping):
|
||||
# recursive call
|
||||
if not self.compare(resource1[key], resource2[key]):
|
||||
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
|
||||
return False
|
||||
elif isinstance(resource1[key], list):
|
||||
# change comparison function to compare_list
|
||||
if not self.compare_list(resource1[key], resource2[key]):
|
||||
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
|
||||
return False
|
||||
elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
|
||||
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
|
||||
return False
|
||||
|
||||
# Checks all keys in the second dict, looking for missing elements
|
||||
for key in resource2.keys():
|
||||
if key not in resource1:
|
||||
if resource2[key] is not None:
|
||||
# Inexistent key is equivalent to exist with value None
|
||||
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def compare_list(self, first_resource, second_resource):
|
||||
"""
|
||||
Recursively compares lists contents equivalence, ignoring types and element orders.
|
||||
Lists with same size are compared value by value after a sort,
|
||||
each element is converted to str before the comparison.
|
||||
:arg list first_resource: first list
|
||||
:arg list second_resource: second list
|
||||
:return: True when equal; False when different.
|
||||
"""
|
||||
|
||||
resource1 = first_resource
|
||||
resource2 = second_resource
|
||||
|
||||
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
|
||||
|
||||
# The second list is null / empty / False
|
||||
if not resource2:
|
||||
self.module.log("resource 2 is null. " + debug_resources)
|
||||
return False
|
||||
|
||||
if len(resource1) != len(resource2):
|
||||
self.module.log("resources have different length. " + debug_resources)
|
||||
return False
|
||||
|
||||
resource1 = sorted(resource1, key=_str_sorted)
|
||||
resource2 = sorted(resource2, key=_str_sorted)
|
||||
|
||||
for i, val in enumerate(resource1):
|
||||
if isinstance(val, Mapping):
|
||||
# change comparison function to compare dictionaries
|
||||
if not self.compare(val, resource2[i]):
|
||||
self.module.log("resources are different. " + debug_resources)
|
||||
return False
|
||||
elif isinstance(val, list):
|
||||
# recursive call
|
||||
if not self.compare_list(val, resource2[i]):
|
||||
self.module.log("lists are different. " + debug_resources)
|
||||
return False
|
||||
elif _standardize_value(val) != _standardize_value(resource2[i]):
|
||||
self.module.log("values are different. " + debug_resources)
|
||||
return False
|
||||
|
||||
# no differences found
|
||||
return True
|
@ -0,0 +1,122 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def online_argument_spec():
|
||||
return dict(
|
||||
api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']),
|
||||
no_log=True, aliases=['oauth_token']),
|
||||
api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']),
|
||||
api_timeout=dict(type='int', default=30, aliases=['timeout']),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
)
|
||||
|
||||
|
||||
class OnlineException(Exception):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
|
||||
|
||||
class Response(object):
|
||||
|
||||
def __init__(self, resp, info):
|
||||
self.body = None
|
||||
if resp:
|
||||
self.body = resp.read()
|
||||
self.info = info
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
if not self.body:
|
||||
if "body" in self.info:
|
||||
return json.loads(self.info["body"])
|
||||
return None
|
||||
try:
|
||||
return json.loads(self.body)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
return self.info["status"]
|
||||
|
||||
@property
|
||||
def ok(self):
|
||||
return self.status_code in (200, 201, 202, 204)
|
||||
|
||||
|
||||
class Online(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.headers = {
|
||||
'Authorization': "Bearer %s" % self.module.params.get('api_token'),
|
||||
'User-Agent': self.get_user_agent_string(module),
|
||||
'Content-type': 'application/json',
|
||||
}
|
||||
self.name = None
|
||||
|
||||
def get_resources(self):
|
||||
results = self.get('/%s' % self.name)
|
||||
if not results.ok:
|
||||
raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format(
|
||||
self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
|
||||
results.status_code, results.json['message']
|
||||
))
|
||||
|
||||
return results.json
|
||||
|
||||
def _url_builder(self, path):
|
||||
if path[0] == '/':
|
||||
path = path[1:]
|
||||
return '%s/%s' % (self.module.params.get('api_url'), path)
|
||||
|
||||
def send(self, method, path, data=None, headers=None):
|
||||
url = self._url_builder(path)
|
||||
data = self.module.jsonify(data)
|
||||
|
||||
if headers is not None:
|
||||
self.headers.update(headers)
|
||||
|
||||
resp, info = fetch_url(
|
||||
self.module, url, data=data, headers=self.headers, method=method,
|
||||
timeout=self.module.params.get('api_timeout')
|
||||
)
|
||||
|
||||
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
|
||||
if info['status'] == -1:
|
||||
self.module.fail_json(msg=info['msg'])
|
||||
|
||||
return Response(resp, info)
|
||||
|
||||
@staticmethod
|
||||
def get_user_agent_string(module):
|
||||
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
|
||||
|
||||
def get(self, path, data=None, headers=None):
|
||||
return self.send('GET', path, data, headers)
|
||||
|
||||
def put(self, path, data=None, headers=None):
|
||||
return self.send('PUT', path, data, headers)
|
||||
|
||||
def post(self, path, data=None, headers=None):
|
||||
return self.send('POST', path, data, headers)
|
||||
|
||||
def delete(self, path, data=None, headers=None):
|
||||
return self.send('DELETE', path, data, headers)
|
||||
|
||||
def patch(self, path, data=None, headers=None):
|
||||
return self.send("PATCH", path, data, headers)
|
||||
|
||||
def update(self, path, data=None, headers=None):
|
||||
return self.send("UPDATE", path, data, headers)
|
@ -0,0 +1,313 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2018 www.privaz.io Valletech AB
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import time
|
||||
import ssl
|
||||
from os import environ
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
HAS_PYONE = True
|
||||
|
||||
try:
|
||||
from pyone import OneException
|
||||
from pyone.server import OneServer
|
||||
except ImportError:
|
||||
OneException = Exception
|
||||
HAS_PYONE = False
|
||||
|
||||
|
||||
class OpenNebulaModule:
|
||||
"""
|
||||
Base class for all OpenNebula Ansible Modules.
|
||||
This is basically a wrapper of the common arguments, the pyone client and
|
||||
some utility methods.
|
||||
"""
|
||||
|
||||
common_args = dict(
|
||||
api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")),
|
||||
api_username=dict(type='str', default=environ.get("ONE_USERNAME")),
|
||||
api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
wait_timeout=dict(type='int', default=300),
|
||||
)
|
||||
|
||||
def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None):
|
||||
|
||||
module_args = OpenNebulaModule.common_args.copy()
|
||||
module_args.update(argument_spec)
|
||||
|
||||
self.module = AnsibleModule(argument_spec=module_args,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
required_one_of=required_one_of,
|
||||
required_if=required_if)
|
||||
self.result = dict(changed=False,
|
||||
original_message='',
|
||||
message='')
|
||||
self.one = self.create_one_client()
|
||||
|
||||
self.resolved_parameters = self.resolve_parameters()
|
||||
|
||||
def create_one_client(self):
|
||||
"""
|
||||
Creates an XMLPRC client to OpenNebula.
|
||||
|
||||
Returns: the new xmlrpc client.
|
||||
|
||||
"""
|
||||
|
||||
# context required for not validating SSL, old python versions won't validate anyway.
|
||||
if hasattr(ssl, '_create_unverified_context'):
|
||||
no_ssl_validation_context = ssl._create_unverified_context()
|
||||
else:
|
||||
no_ssl_validation_context = None
|
||||
|
||||
# Check if the module can run
|
||||
if not HAS_PYONE:
|
||||
self.fail("pyone is required for this module")
|
||||
|
||||
if self.module.params.get("api_url"):
|
||||
url = self.module.params.get("api_url")
|
||||
else:
|
||||
self.fail("Either api_url or the environment variable ONE_URL must be provided")
|
||||
|
||||
if self.module.params.get("api_username"):
|
||||
username = self.module.params.get("api_username")
|
||||
else:
|
||||
self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided")
|
||||
|
||||
if self.module.params.get("api_password"):
|
||||
password = self.module.params.get("api_password")
|
||||
else:
|
||||
self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided")
|
||||
|
||||
session = "%s:%s" % (username, password)
|
||||
|
||||
if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ:
|
||||
return OneServer(url, session=session, context=no_ssl_validation_context)
|
||||
else:
|
||||
return OneServer(url, session)
|
||||
|
||||
def close_one_client(self):
|
||||
"""
|
||||
Close the pyone session.
|
||||
"""
|
||||
self.one.server_close()
|
||||
|
||||
def fail(self, msg):
|
||||
"""
|
||||
Utility failure method, will ensure pyone is properly closed before failing.
|
||||
Args:
|
||||
msg: human readable failure reason.
|
||||
"""
|
||||
if hasattr(self, 'one'):
|
||||
self.close_one_client()
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
def exit(self):
|
||||
"""
|
||||
Utility exit method, will ensure pyone is properly closed before exiting.
|
||||
|
||||
"""
|
||||
if hasattr(self, 'one'):
|
||||
self.close_one_client()
|
||||
self.module.exit_json(**self.result)
|
||||
|
||||
def resolve_parameters(self):
|
||||
"""
|
||||
This method resolves parameters provided by a secondary ID to the primary ID.
|
||||
For example if cluster_name is present, cluster_id will be introduced by performing
|
||||
the required resolution
|
||||
|
||||
Returns: a copy of the parameters that includes the resolved parameters.
|
||||
|
||||
"""
|
||||
|
||||
resolved_params = dict(self.module.params)
|
||||
|
||||
if 'cluster_name' in self.module.params:
|
||||
clusters = self.one.clusterpool.info()
|
||||
for cluster in clusters.CLUSTER:
|
||||
if cluster.NAME == self.module.params.get('cluster_name'):
|
||||
resolved_params['cluster_id'] = cluster.ID
|
||||
|
||||
return resolved_params
|
||||
|
||||
def is_parameter(self, name):
|
||||
"""
|
||||
Utility method to check if a parameter was provided or is resolved
|
||||
Args:
|
||||
name: the parameter to check
|
||||
"""
|
||||
if name in self.resolved_parameters:
|
||||
return self.get_parameter(name) is not None
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_parameter(self, name):
|
||||
"""
|
||||
Utility method for accessing parameters that includes resolved ID
|
||||
parameters from provided Name parameters.
|
||||
"""
|
||||
return self.resolved_parameters.get(name)
|
||||
|
||||
def get_host_by_name(self, name):
|
||||
'''
|
||||
Returns a host given its name.
|
||||
Args:
|
||||
name: the name of the host
|
||||
|
||||
Returns: the host object or None if the host is absent.
|
||||
|
||||
'''
|
||||
hosts = self.one.hostpool.info()
|
||||
for h in hosts.HOST:
|
||||
if h.NAME == name:
|
||||
return h
|
||||
return None
|
||||
|
||||
def get_cluster_by_name(self, name):
|
||||
"""
|
||||
Returns a cluster given its name.
|
||||
Args:
|
||||
name: the name of the cluster
|
||||
|
||||
Returns: the cluster object or None if the host is absent.
|
||||
"""
|
||||
|
||||
clusters = self.one.clusterpool.info()
|
||||
for c in clusters.CLUSTER:
|
||||
if c.NAME == name:
|
||||
return c
|
||||
return None
|
||||
|
||||
def get_template_by_name(self, name):
|
||||
'''
|
||||
Returns a template given its name.
|
||||
Args:
|
||||
name: the name of the template
|
||||
|
||||
Returns: the template object or None if the host is absent.
|
||||
|
||||
'''
|
||||
templates = self.one.templatepool.info()
|
||||
for t in templates.TEMPLATE:
|
||||
if t.NAME == name:
|
||||
return t
|
||||
return None
|
||||
|
||||
def cast_template(self, template):
|
||||
"""
|
||||
OpenNebula handles all template elements as strings
|
||||
At some point there is a cast being performed on types provided by the user
|
||||
This function mimics that transformation so that required template updates are detected properly
|
||||
additionally an array will be converted to a comma separated list,
|
||||
which works for labels and hopefully for something more.
|
||||
|
||||
Args:
|
||||
template: the template to transform
|
||||
|
||||
Returns: the transformed template with data casts applied.
|
||||
"""
|
||||
|
||||
# TODO: check formally available data types in templates
|
||||
# TODO: some arrays might be converted to space separated
|
||||
|
||||
for key in template:
|
||||
value = template[key]
|
||||
if isinstance(value, dict):
|
||||
self.cast_template(template[key])
|
||||
elif isinstance(value, list):
|
||||
template[key] = ', '.join(value)
|
||||
elif not isinstance(value, string_types):
|
||||
template[key] = str(value)
|
||||
|
||||
def requires_template_update(self, current, desired):
|
||||
"""
|
||||
This function will help decide if a template update is required or not
|
||||
If a desired key is missing from the current dictionary an update is required
|
||||
If the intersection of both dictionaries is not deep equal, an update is required
|
||||
Args:
|
||||
current: current template as a dictionary
|
||||
desired: desired template as a dictionary
|
||||
|
||||
Returns: True if a template update is required
|
||||
"""
|
||||
|
||||
if not desired:
|
||||
return False
|
||||
|
||||
self.cast_template(desired)
|
||||
intersection = dict()
|
||||
for dkey in desired.keys():
|
||||
if dkey in current.keys():
|
||||
intersection[dkey] = current[dkey]
|
||||
else:
|
||||
return True
|
||||
return not (desired == intersection)
|
||||
|
||||
def wait_for_state(self, element_name, state, state_name, target_states,
|
||||
invalid_states=None, transition_states=None,
|
||||
wait_timeout=None):
|
||||
"""
|
||||
Args:
|
||||
element_name: the name of the object we are waiting for: HOST, VM, etc.
|
||||
state: lambda that returns the current state, will be queried until target state is reached
|
||||
state_name: lambda that returns the readable form of a given state
|
||||
target_states: states expected to be reached
|
||||
invalid_states: if any of this states is reached, fail
|
||||
transition_states: when used, these are the valid states during the transition.
|
||||
wait_timeout: timeout period in seconds. Defaults to the provided parameter.
|
||||
"""
|
||||
|
||||
if not wait_timeout:
|
||||
wait_timeout = self.module.params.get("wait_timeout")
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
while (time.time() - start_time) < wait_timeout:
|
||||
current_state = state()
|
||||
|
||||
if current_state in invalid_states:
|
||||
self.fail('invalid %s state %s' % (element_name, state_name(current_state)))
|
||||
|
||||
if transition_states:
|
||||
if current_state not in transition_states:
|
||||
self.fail('invalid %s transition state %s' % (element_name, state_name(current_state)))
|
||||
|
||||
if current_state in target_states:
|
||||
return True
|
||||
|
||||
time.sleep(self.one.server_retry_interval())
|
||||
|
||||
self.fail(msg="Wait timeout has expired!")
|
||||
|
||||
def run_module(self):
|
||||
"""
|
||||
trigger the start of the execution of the module.
|
||||
Returns:
|
||||
|
||||
"""
|
||||
try:
|
||||
self.run(self.one, self.module, self.result)
|
||||
except OneException as e:
|
||||
self.fail(msg="OpenNebula Exception: %s" % e)
|
||||
|
||||
def run(self, one, module, result):
|
||||
"""
|
||||
to be implemented by subclass with the actual module actions.
|
||||
Args:
|
||||
one: the OpenNebula XMLRPC client
|
||||
module: the Ansible Module object
|
||||
result: the Ansible result
|
||||
"""
|
||||
raise NotImplementedError("Method requires implementation")
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,100 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2020, Tristan Le Guern <tleguern at bouledef.eu>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import atexit
|
||||
import time
|
||||
import re
|
||||
import traceback
|
||||
|
||||
PROXMOXER_IMP_ERR = None
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
PROXMOXER_IMP_ERR = traceback.format_exc()
|
||||
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
|
||||
|
||||
def proxmox_auth_argument_spec():
|
||||
return dict(
|
||||
api_host=dict(type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['PROXMOX_HOST'])
|
||||
),
|
||||
api_user=dict(type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['PROXMOX_USER'])
|
||||
),
|
||||
api_password=dict(type='str',
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['PROXMOX_PASSWORD'])
|
||||
),
|
||||
api_token_id=dict(type='str',
|
||||
no_log=False
|
||||
),
|
||||
api_token_secret=dict(type='str',
|
||||
no_log=True
|
||||
),
|
||||
validate_certs=dict(type='bool',
|
||||
default=False
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def proxmox_to_ansible_bool(value):
|
||||
'''Convert Proxmox representation of a boolean to be ansible-friendly'''
|
||||
return True if value == 1 else False
|
||||
|
||||
|
||||
def ansible_to_proxmox_bool(value):
|
||||
'''Convert Ansible representation of a boolean to be proxmox-friendly'''
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if not isinstance(value, bool):
|
||||
raise ValueError("%s must be of type bool not %s" % (value, type(value)))
|
||||
|
||||
return 1 if value else 0
|
||||
|
||||
|
||||
class ProxmoxAnsible(object):
|
||||
"""Base class for Proxmox modules"""
|
||||
def __init__(self, module):
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
self.module = module
|
||||
self.proxmox_api = self._connect()
|
||||
# Test token validity
|
||||
try:
|
||||
self.proxmox_api.version.get()
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e, exception=traceback.format_exc())
|
||||
|
||||
def _connect(self):
|
||||
api_host = self.module.params['api_host']
|
||||
api_user = self.module.params['api_user']
|
||||
api_password = self.module.params['api_password']
|
||||
api_token_id = self.module.params['api_token_id']
|
||||
api_token_secret = self.module.params['api_token_secret']
|
||||
validate_certs = self.module.params['validate_certs']
|
||||
|
||||
auth_args = {'user': api_user}
|
||||
if api_password:
|
||||
auth_args['password'] = api_password
|
||||
else:
|
||||
auth_args['token_name'] = api_token_id
|
||||
auth_args['token_value'] = api_token_secret
|
||||
|
||||
try:
|
||||
return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
|
@ -0,0 +1,112 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
HAS_PURESTORAGE = True
|
||||
try:
|
||||
from purestorage import purestorage
|
||||
except ImportError:
|
||||
HAS_PURESTORAGE = False
|
||||
|
||||
HAS_PURITY_FB = True
|
||||
try:
|
||||
from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest
|
||||
except ImportError:
|
||||
HAS_PURITY_FB = False
|
||||
|
||||
from functools import wraps
|
||||
from os import environ
|
||||
from os import path
|
||||
import platform
|
||||
|
||||
VERSION = 1.2
|
||||
USER_AGENT_BASE = 'Ansible'
|
||||
API_AGENT_VERSION = 1.5
|
||||
|
||||
|
||||
def get_system(module):
|
||||
"""Return System Object or Fail"""
|
||||
user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
|
||||
'base': USER_AGENT_BASE,
|
||||
'class': __name__,
|
||||
'version': VERSION,
|
||||
'platform': platform.platform()
|
||||
}
|
||||
array_name = module.params['fa_url']
|
||||
api = module.params['api_token']
|
||||
|
||||
if array_name and api:
|
||||
system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
|
||||
elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
|
||||
system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
|
||||
else:
|
||||
module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
|
||||
try:
|
||||
system.get()
|
||||
except Exception:
|
||||
module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
|
||||
return system
|
||||
|
||||
|
||||
def get_blade(module):
|
||||
"""Return System Object or Fail"""
|
||||
user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
|
||||
'base': USER_AGENT_BASE,
|
||||
'class': __name__,
|
||||
'version': VERSION,
|
||||
'platform': platform.platform()
|
||||
}
|
||||
blade_name = module.params['fb_url']
|
||||
api = module.params['api_token']
|
||||
|
||||
if blade_name and api:
|
||||
blade = PurityFb(blade_name)
|
||||
blade.disable_verify_ssl()
|
||||
try:
|
||||
blade.login(api)
|
||||
versions = blade.api_version.list_versions().versions
|
||||
if API_AGENT_VERSION in versions:
|
||||
blade._api_client.user_agent = user_agent
|
||||
except rest.ApiException as e:
|
||||
module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
|
||||
elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'):
|
||||
blade = PurityFb(environ.get('PUREFB_URL'))
|
||||
blade.disable_verify_ssl()
|
||||
try:
|
||||
blade.login(environ.get('PUREFB_API'))
|
||||
versions = blade.api_version.list_versions().versions
|
||||
if API_AGENT_VERSION in versions:
|
||||
blade._api_client.user_agent = user_agent
|
||||
except rest.ApiException as e:
|
||||
module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
|
||||
else:
|
||||
module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments")
|
||||
return blade
|
||||
|
||||
|
||||
def purefa_argument_spec():
|
||||
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
|
||||
|
||||
return dict(
|
||||
fa_url=dict(),
|
||||
api_token=dict(no_log=True),
|
||||
)
|
||||
|
||||
|
||||
def purefb_argument_spec():
|
||||
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
|
||||
|
||||
return dict(
|
||||
fb_url=dict(),
|
||||
api_token=dict(no_log=True),
|
||||
)
|
@ -0,0 +1,316 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by
|
||||
# Ansible still belong to the author of the module, and may assign their own
|
||||
# license to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
from uuid import UUID
|
||||
|
||||
from ansible.module_utils.six import text_type, binary_type
|
||||
|
||||
FINAL_STATUSES = ('ACTIVE', 'ERROR')
|
||||
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
|
||||
'error', 'error_deleting')
|
||||
|
||||
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
|
||||
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
|
||||
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
|
||||
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
|
||||
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
|
||||
|
||||
NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
|
||||
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
|
||||
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
|
||||
|
||||
|
||||
def rax_slugify(value):
|
||||
"""Prepend a key with rax_ and normalize the key name"""
|
||||
return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
|
||||
|
||||
|
||||
def rax_clb_node_to_dict(obj):
|
||||
"""Function to convert a CLB Node object to a dict"""
|
||||
if not obj:
|
||||
return {}
|
||||
node = obj.to_dict()
|
||||
node['id'] = obj.id
|
||||
node['weight'] = obj.weight
|
||||
return node
|
||||
|
||||
|
||||
def rax_to_dict(obj, obj_type='standard'):
|
||||
"""Generic function to convert a pyrax object to a dict
|
||||
|
||||
obj_type values:
|
||||
standard
|
||||
clb
|
||||
server
|
||||
|
||||
"""
|
||||
instance = {}
|
||||
for key in dir(obj):
|
||||
value = getattr(obj, key)
|
||||
if obj_type == 'clb' and key == 'nodes':
|
||||
instance[key] = []
|
||||
for node in value:
|
||||
instance[key].append(rax_clb_node_to_dict(node))
|
||||
elif (isinstance(value, list) and len(value) > 0 and
|
||||
not isinstance(value[0], NON_CALLABLES)):
|
||||
instance[key] = []
|
||||
for item in value:
|
||||
instance[key].append(rax_to_dict(item))
|
||||
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
|
||||
if obj_type == 'server':
|
||||
if key == 'image':
|
||||
if not value:
|
||||
instance['rax_boot_source'] = 'volume'
|
||||
else:
|
||||
instance['rax_boot_source'] = 'local'
|
||||
key = rax_slugify(key)
|
||||
instance[key] = value
|
||||
|
||||
if obj_type == 'server':
|
||||
for attr in ['id', 'accessIPv4', 'name', 'status']:
|
||||
instance[attr] = instance.get(rax_slugify(attr))
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
def rax_find_bootable_volume(module, rax_module, server, exit=True):
|
||||
"""Find a servers bootable volume"""
|
||||
cs = rax_module.cloudservers
|
||||
cbs = rax_module.cloud_blockstorage
|
||||
server_id = rax_module.utils.get_id(server)
|
||||
volumes = cs.volumes.get_server_volumes(server_id)
|
||||
bootable_volumes = []
|
||||
for volume in volumes:
|
||||
vol = cbs.get(volume)
|
||||
if module.boolean(vol.bootable):
|
||||
bootable_volumes.append(vol)
|
||||
if not bootable_volumes:
|
||||
if exit:
|
||||
module.fail_json(msg='No bootable volumes could be found for '
|
||||
'server %s' % server_id)
|
||||
else:
|
||||
return False
|
||||
elif len(bootable_volumes) > 1:
|
||||
if exit:
|
||||
module.fail_json(msg='Multiple bootable volumes found for server '
|
||||
'%s' % server_id)
|
||||
else:
|
||||
return False
|
||||
|
||||
return bootable_volumes[0]
|
||||
|
||||
|
||||
def rax_find_image(module, rax_module, image, exit=True):
|
||||
"""Find a server image by ID or Name"""
|
||||
cs = rax_module.cloudservers
|
||||
try:
|
||||
UUID(image)
|
||||
except ValueError:
|
||||
try:
|
||||
image = cs.images.find(human_id=image)
|
||||
except(cs.exceptions.NotFound,
|
||||
cs.exceptions.NoUniqueMatch):
|
||||
try:
|
||||
image = cs.images.find(name=image)
|
||||
except (cs.exceptions.NotFound,
|
||||
cs.exceptions.NoUniqueMatch):
|
||||
if exit:
|
||||
module.fail_json(msg='No matching image found (%s)' %
|
||||
image)
|
||||
else:
|
||||
return False
|
||||
|
||||
return rax_module.utils.get_id(image)
|
||||
|
||||
|
||||
def rax_find_volume(module, rax_module, name):
|
||||
"""Find a Block storage volume by ID or name"""
|
||||
cbs = rax_module.cloud_blockstorage
|
||||
try:
|
||||
UUID(name)
|
||||
volume = cbs.get(name)
|
||||
except ValueError:
|
||||
try:
|
||||
volume = cbs.find(name=name)
|
||||
except rax_module.exc.NotFound:
|
||||
volume = None
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
return volume
|
||||
|
||||
|
||||
def rax_find_network(module, rax_module, network):
|
||||
"""Find a cloud network by ID or name"""
|
||||
cnw = rax_module.cloud_networks
|
||||
try:
|
||||
UUID(network)
|
||||
except ValueError:
|
||||
if network.lower() == 'public':
|
||||
return cnw.get_server_networks(PUBLIC_NET_ID)
|
||||
elif network.lower() == 'private':
|
||||
return cnw.get_server_networks(SERVICE_NET_ID)
|
||||
else:
|
||||
try:
|
||||
network_obj = cnw.find_network_by_label(network)
|
||||
except (rax_module.exceptions.NetworkNotFound,
|
||||
rax_module.exceptions.NetworkLabelNotUnique):
|
||||
module.fail_json(msg='No matching network found (%s)' %
|
||||
network)
|
||||
else:
|
||||
return cnw.get_server_networks(network_obj)
|
||||
else:
|
||||
return cnw.get_server_networks(network)
|
||||
|
||||
|
||||
def rax_find_server(module, rax_module, server):
|
||||
"""Find a Cloud Server by ID or name"""
|
||||
cs = rax_module.cloudservers
|
||||
try:
|
||||
UUID(server)
|
||||
server = cs.servers.get(server)
|
||||
except ValueError:
|
||||
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
|
||||
if not servers:
|
||||
module.fail_json(msg='No Server was matched by name, '
|
||||
'try using the Server ID instead')
|
||||
if len(servers) > 1:
|
||||
module.fail_json(msg='Multiple servers matched by name, '
|
||||
'try using the Server ID instead')
|
||||
|
||||
# We made it this far, grab the first and hopefully only server
|
||||
# in the list
|
||||
server = servers[0]
|
||||
return server
|
||||
|
||||
|
||||
def rax_find_loadbalancer(module, rax_module, loadbalancer):
|
||||
"""Find a Cloud Load Balancer by ID or name"""
|
||||
clb = rax_module.cloud_loadbalancers
|
||||
try:
|
||||
found = clb.get(loadbalancer)
|
||||
except Exception:
|
||||
found = []
|
||||
for lb in clb.list():
|
||||
if loadbalancer == lb.name:
|
||||
found.append(lb)
|
||||
|
||||
if not found:
|
||||
module.fail_json(msg='No loadbalancer was matched')
|
||||
|
||||
if len(found) > 1:
|
||||
module.fail_json(msg='Multiple loadbalancers matched')
|
||||
|
||||
# We made it this far, grab the first and hopefully only item
|
||||
# in the list
|
||||
found = found[0]
|
||||
|
||||
return found
|
||||
|
||||
|
||||
def rax_argument_spec():
|
||||
"""Return standard base dictionary used for the argument_spec
|
||||
argument in AnsibleModule
|
||||
|
||||
"""
|
||||
return dict(
|
||||
api_key=dict(type='str', aliases=['password'], no_log=True),
|
||||
auth_endpoint=dict(type='str'),
|
||||
credentials=dict(type='path', aliases=['creds_file']),
|
||||
env=dict(type='str'),
|
||||
identity_type=dict(type='str', default='rackspace'),
|
||||
region=dict(type='str'),
|
||||
tenant_id=dict(type='str'),
|
||||
tenant_name=dict(type='str'),
|
||||
username=dict(type='str'),
|
||||
validate_certs=dict(type='bool', aliases=['verify_ssl']),
|
||||
)
|
||||
|
||||
|
||||
def rax_required_together():
|
||||
"""Return the default list used for the required_together argument to
|
||||
AnsibleModule"""
|
||||
return [['api_key', 'username']]
|
||||
|
||||
|
||||
def setup_rax_module(module, rax_module, region_required=True):
|
||||
"""Set up pyrax in a standard way for all modules"""
|
||||
rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
|
||||
rax_module.USER_AGENT)
|
||||
|
||||
api_key = module.params.get('api_key')
|
||||
auth_endpoint = module.params.get('auth_endpoint')
|
||||
credentials = module.params.get('credentials')
|
||||
env = module.params.get('env')
|
||||
identity_type = module.params.get('identity_type')
|
||||
region = module.params.get('region')
|
||||
tenant_id = module.params.get('tenant_id')
|
||||
tenant_name = module.params.get('tenant_name')
|
||||
username = module.params.get('username')
|
||||
verify_ssl = module.params.get('validate_certs')
|
||||
|
||||
if env is not None:
|
||||
rax_module.set_environment(env)
|
||||
|
||||
rax_module.set_setting('identity_type', identity_type)
|
||||
if verify_ssl is not None:
|
||||
rax_module.set_setting('verify_ssl', verify_ssl)
|
||||
if auth_endpoint is not None:
|
||||
rax_module.set_setting('auth_endpoint', auth_endpoint)
|
||||
if tenant_id is not None:
|
||||
rax_module.set_setting('tenant_id', tenant_id)
|
||||
if tenant_name is not None:
|
||||
rax_module.set_setting('tenant_name', tenant_name)
|
||||
|
||||
try:
|
||||
username = username or os.environ.get('RAX_USERNAME')
|
||||
if not username:
|
||||
username = rax_module.get_setting('keyring_username')
|
||||
if username:
|
||||
api_key = 'USE_KEYRING'
|
||||
if not api_key:
|
||||
api_key = os.environ.get('RAX_API_KEY')
|
||||
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
|
||||
os.environ.get('RAX_CREDS_FILE'))
|
||||
region = (region or os.environ.get('RAX_REGION') or
|
||||
rax_module.get_setting('region'))
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
try:
|
||||
if api_key and username:
|
||||
if api_key == 'USE_KEYRING':
|
||||
rax_module.keyring_auth(username, region=region)
|
||||
else:
|
||||
rax_module.set_credentials(username, api_key=api_key,
|
||||
region=region)
|
||||
elif credentials:
|
||||
credentials = os.path.expanduser(credentials)
|
||||
rax_module.set_credential_file(credentials, region=region)
|
||||
else:
|
||||
raise Exception('No credentials supplied!')
|
||||
except Exception as e:
|
||||
if e.message:
|
||||
msg = str(e.message)
|
||||
else:
|
||||
msg = repr(e)
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
if region_required and region not in rax_module.regions:
|
||||
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
|
||||
(region, ','.join(rax_module.regions)))
|
||||
|
||||
return rax_module
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,271 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), James Laska
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
import types
|
||||
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
|
||||
|
||||
class RegistrationBase(object):
|
||||
def __init__(self, module, username=None, password=None):
|
||||
self.module = module
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def configure(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def enable(self):
|
||||
# Remove any existing redhat.repo
|
||||
redhat_repo = '/etc/yum.repos.d/redhat.repo'
|
||||
if os.path.isfile(redhat_repo):
|
||||
os.unlink(redhat_repo)
|
||||
|
||||
def register(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def unregister(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def unsubscribe(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def update_plugin_conf(self, plugin, enabled=True):
|
||||
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
|
||||
|
||||
if os.path.isfile(plugin_conf):
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
shutil.copy2(plugin_conf, tmpfile)
|
||||
cfg = configparser.ConfigParser()
|
||||
cfg.read([tmpfile])
|
||||
|
||||
if enabled:
|
||||
cfg.set('main', 'enabled', 1)
|
||||
else:
|
||||
cfg.set('main', 'enabled', 0)
|
||||
|
||||
fd = open(tmpfile, 'w+')
|
||||
cfg.write(fd)
|
||||
fd.close()
|
||||
self.module.atomic_move(tmpfile, plugin_conf)
|
||||
|
||||
def subscribe(self, **kwargs):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
|
||||
class Rhsm(RegistrationBase):
|
||||
def __init__(self, module, username=None, password=None):
|
||||
RegistrationBase.__init__(self, module, username, password)
|
||||
self.config = self._read_config()
|
||||
self.module = module
|
||||
|
||||
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
|
||||
'''
|
||||
Load RHSM configuration from /etc/rhsm/rhsm.conf.
|
||||
Returns:
|
||||
* ConfigParser object
|
||||
'''
|
||||
|
||||
# Read RHSM defaults ...
|
||||
cp = configparser.ConfigParser()
|
||||
cp.read(rhsm_conf)
|
||||
|
||||
# Add support for specifying a default value w/o having to standup some configuration
|
||||
# Yeah, I know this should be subclassed ... but, oh well
|
||||
def get_option_default(self, key, default=''):
|
||||
sect, opt = key.split('.', 1)
|
||||
if self.has_section(sect) and self.has_option(sect, opt):
|
||||
return self.get(sect, opt)
|
||||
else:
|
||||
return default
|
||||
|
||||
cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
|
||||
|
||||
return cp
|
||||
|
||||
def enable(self):
|
||||
'''
|
||||
Enable the system to receive updates from subscription-manager.
|
||||
This involves updating affected yum plugins and removing any
|
||||
conflicting yum repositories.
|
||||
'''
|
||||
RegistrationBase.enable(self)
|
||||
self.update_plugin_conf('rhnplugin', False)
|
||||
self.update_plugin_conf('subscription-manager', True)
|
||||
|
||||
def configure(self, **kwargs):
|
||||
'''
|
||||
Configure the system as directed for registration with RHN
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'config']
|
||||
|
||||
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
|
||||
# non-configuration parameters and replace '_' with '.'. For example,
|
||||
# 'server_hostname' becomes '--system.hostname'.
|
||||
for k, v in kwargs.items():
|
||||
if re.search(r'^(system|rhsm)_', k):
|
||||
args.append('--%s=%s' % (k.replace('_', '.'), v))
|
||||
|
||||
self.module.run_command(args, check_rc=True)
|
||||
|
||||
@property
|
||||
def is_registered(self):
|
||||
'''
|
||||
Determine whether the current system
|
||||
Returns:
|
||||
* Boolean - whether the current system is currently registered to
|
||||
RHN.
|
||||
'''
|
||||
args = ['subscription-manager', 'identity']
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def register(self, username, password, autosubscribe, activationkey):
|
||||
'''
|
||||
Register the current system to the provided RHN server
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'register']
|
||||
|
||||
# Generate command arguments
|
||||
if activationkey:
|
||||
args.append('--activationkey "%s"' % activationkey)
|
||||
else:
|
||||
if autosubscribe:
|
||||
args.append('--autosubscribe')
|
||||
if username:
|
||||
args.extend(['--username', username])
|
||||
if password:
|
||||
args.extend(['--password', password])
|
||||
|
||||
# Do the needful...
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def unsubscribe(self):
|
||||
'''
|
||||
Unsubscribe a system from all subscribed channels
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'unsubscribe', '--all']
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def unregister(self):
|
||||
'''
|
||||
Unregister a currently registered system
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'unregister']
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
self.update_plugin_conf('rhnplugin', False)
|
||||
self.update_plugin_conf('subscription-manager', False)
|
||||
|
||||
def subscribe(self, regexp):
|
||||
'''
|
||||
Subscribe current system to available pools matching the specified
|
||||
regular expression
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
|
||||
# Available pools ready for subscription
|
||||
available_pools = RhsmPools(self.module)
|
||||
|
||||
for pool in available_pools.filter(regexp):
|
||||
pool.subscribe()
|
||||
|
||||
|
||||
class RhsmPool(object):
|
||||
'''
|
||||
Convenience class for housing subscription information
|
||||
'''
|
||||
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
for k, v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__getattribute__('_name'))
|
||||
|
||||
def subscribe(self):
|
||||
args = "subscription-manager subscribe --pool %s" % self.PoolId
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class RhsmPools(object):
|
||||
"""
|
||||
This class is used for manipulating pools subscriptions with RHSM
|
||||
"""
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.products = self._load_product_list()
|
||||
|
||||
def __iter__(self):
|
||||
return self.products.__iter__()
|
||||
|
||||
def _load_product_list(self):
|
||||
"""
|
||||
Loads list of all available pools for system in data structure
|
||||
"""
|
||||
args = "subscription-manager list --available"
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
|
||||
products = []
|
||||
for line in stdout.split('\n'):
|
||||
# Remove leading+trailing whitespace
|
||||
line = line.strip()
|
||||
# An empty line implies the end of an output group
|
||||
if len(line) == 0:
|
||||
continue
|
||||
# If a colon ':' is found, parse
|
||||
elif ':' in line:
|
||||
(key, value) = line.split(':', 1)
|
||||
key = key.strip().replace(" ", "") # To unify
|
||||
value = value.strip()
|
||||
if key in ['ProductName', 'SubscriptionName']:
|
||||
# Remember the name for later processing
|
||||
products.append(RhsmPool(self.module, _name=value, key=value))
|
||||
elif products:
|
||||
# Associate value with most recently recorded product
|
||||
products[-1].__setattr__(key, value)
|
||||
# FIXME - log some warning?
|
||||
# else:
|
||||
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
|
||||
return products
|
||||
|
||||
def filter(self, regexp='^$'):
|
||||
'''
|
||||
Return a list of RhsmPools whose name matches the provided regular expression
|
||||
'''
|
||||
r = re.compile(regexp)
|
||||
for product in self.products:
|
||||
if r.search(product._name):
|
||||
yield product
|
@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
REDIS_IMP_ERR = None
|
||||
try:
|
||||
from redis import Redis
|
||||
from redis import __version__ as redis_version
|
||||
HAS_REDIS_PACKAGE = True
|
||||
except ImportError:
|
||||
REDIS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REDIS_PACKAGE = False
|
||||
|
||||
try:
|
||||
import certifi
|
||||
HAS_CERTIFI_PACKAGE = True
|
||||
except ImportError:
|
||||
CERTIFI_IMPORT_ERROR = traceback.format_exc()
|
||||
HAS_CERTIFI_PACKAGE = False
|
||||
|
||||
|
||||
def fail_imports(module):
|
||||
errors = []
|
||||
traceback = []
|
||||
if not HAS_REDIS_PACKAGE:
|
||||
errors.append(missing_required_lib('redis'))
|
||||
traceback.append(REDIS_IMP_ERR)
|
||||
if not HAS_CERTIFI_PACKAGE:
|
||||
errors.append(missing_required_lib('certifi'))
|
||||
traceback.append(CERTIFI_IMPORT_ERROR)
|
||||
if errors:
|
||||
module.fail_json(errors=errors, traceback='\n'.join(traceback))
|
||||
|
||||
|
||||
def redis_auth_argument_spec():
|
||||
return dict(
|
||||
login_host=dict(type='str',
|
||||
default='localhost',),
|
||||
login_user=dict(type='str'),
|
||||
login_password=dict(type='str',
|
||||
no_log=True
|
||||
),
|
||||
login_port=dict(type='int', default=6379),
|
||||
tls=dict(type='bool',
|
||||
default=True),
|
||||
validate_certs=dict(type='bool',
|
||||
default=True
|
||||
),
|
||||
ca_certs=dict(type='str')
|
||||
)
|
||||
|
||||
|
||||
class RedisAnsible(object):
|
||||
'''Base class for Redis module'''
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.connection = self._connect()
|
||||
|
||||
def _connect(self):
|
||||
login_host = self.module.params['login_host']
|
||||
login_user = self.module.params['login_user']
|
||||
login_password = self.module.params['login_password']
|
||||
login_port = self.module.params['login_port']
|
||||
tls = self.module.params['tls']
|
||||
validate_certs = 'required' if self.module.params['validate_certs'] else None
|
||||
ca_certs = self.module.params['ca_certs']
|
||||
if tls and ca_certs is None:
|
||||
ca_certs = str(certifi.where())
|
||||
if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
|
||||
self.module.fail_json(
|
||||
msg='The option `username` in only supported with redis >= 3.4.0.')
|
||||
params = {'host': login_host,
|
||||
'port': login_port,
|
||||
'password': login_password,
|
||||
'ssl_ca_certs': ca_certs,
|
||||
'ssl_cert_reqs': validate_certs,
|
||||
'ssl': tls}
|
||||
if login_user is not None:
|
||||
params['username'] = login_user
|
||||
try:
|
||||
return Redis(**params)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='{0}'.format(str(e)))
|
||||
return None
|
@ -0,0 +1,79 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by
|
||||
# Ansible still belong to the author of the module, and may assign their
|
||||
# own license to the complete work.
|
||||
#
|
||||
# Copyright (C) 2017 Lenovo, Inc.
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
#
|
||||
# Contains LXCA common class
|
||||
# Lenovo xClarity Administrator (LXCA)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
try:
|
||||
from pylxca import connect, disconnect
|
||||
HAS_PYLXCA = True
|
||||
except ImportError:
|
||||
HAS_PYLXCA = False
|
||||
|
||||
|
||||
PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module."
|
||||
|
||||
|
||||
def has_pylxca(module):
|
||||
"""
|
||||
Check pylxca is installed
|
||||
:param module:
|
||||
"""
|
||||
if not HAS_PYLXCA:
|
||||
module.fail_json(msg=PYLXCA_REQUIRED)
|
||||
|
||||
|
||||
LXCA_COMMON_ARGS = dict(
|
||||
login_user=dict(required=True),
|
||||
login_password=dict(required=True, no_log=True),
|
||||
auth_url=dict(required=True),
|
||||
)
|
||||
|
||||
|
||||
class connection_object:
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
def __enter__(self):
|
||||
return setup_conn(self.module)
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
close_conn()
|
||||
|
||||
|
||||
def setup_conn(module):
|
||||
"""
|
||||
this function create connection to LXCA
|
||||
:param module:
|
||||
:return: lxca connection
|
||||
"""
|
||||
lxca_con = None
|
||||
try:
|
||||
lxca_con = connect(module.params['auth_url'],
|
||||
module.params['login_user'],
|
||||
module.params['login_password'],
|
||||
"True")
|
||||
except Exception as exception:
|
||||
error_msg = '; '.join(exception.args)
|
||||
module.fail_json(msg=error_msg, exception=traceback.format_exc())
|
||||
return lxca_con
|
||||
|
||||
|
||||
def close_conn():
|
||||
"""
|
||||
this function close connection to LXCA
|
||||
:param module:
|
||||
:return: None
|
||||
"""
|
||||
disconnect()
|
@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def api_argument_spec():
|
||||
'''
|
||||
Creates an argument spec that can be used with any module
|
||||
that will be requesting content via Rundeck API
|
||||
'''
|
||||
api_argument_spec = url_argument_spec()
|
||||
api_argument_spec.update(dict(
|
||||
url=dict(required=True, type="str"),
|
||||
api_version=dict(type="int", default=39),
|
||||
api_token=dict(required=True, type="str", no_log=True)
|
||||
))
|
||||
|
||||
return api_argument_spec
|
||||
|
||||
|
||||
def api_request(module, endpoint, data=None, method="GET"):
|
||||
"""Manages Rundeck API requests via HTTP(S)
|
||||
|
||||
:arg module: The AnsibleModule (used to get url, api_version, api_token, etc).
|
||||
:arg endpoint: The API endpoint to be used.
|
||||
:kwarg data: The data to be sent (in case of POST/PUT).
|
||||
:kwarg method: "POST", "PUT", etc.
|
||||
|
||||
:returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
|
||||
The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
|
||||
occurred then ``info['body']`` contains the error response data::
|
||||
|
||||
Example::
|
||||
|
||||
data={...}
|
||||
resp, info = fetch_url(module,
|
||||
"http://rundeck.example.org",
|
||||
data=module.jsonify(data),
|
||||
method="POST")
|
||||
status_code = info["status"]
|
||||
body = resp.read()
|
||||
if status_code >= 400 :
|
||||
body = info['body']
|
||||
"""
|
||||
|
||||
response, info = fetch_url(
|
||||
module=module,
|
||||
url="%s/api/%s/%s" % (
|
||||
module.params["url"],
|
||||
module.params["api_version"],
|
||||
endpoint
|
||||
),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": module.params["api_token"]
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] == 403:
|
||||
module.fail_json(msg="Token authorization failed",
|
||||
execution_info=json.loads(info["body"]))
|
||||
if info["status"] == 409:
|
||||
module.fail_json(msg="Job executions limit reached",
|
||||
execution_info=json.loads(info["body"]))
|
||||
elif info["status"] >= 500:
|
||||
module.fail_json(msg="Rundeck API error",
|
||||
execution_info=json.loads(info["body"]))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
exception=to_native(error),
|
||||
execution_info=content
|
||||
)
|
@ -0,0 +1,178 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
|
||||
# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from stringprep import (
|
||||
in_table_a1,
|
||||
in_table_b1,
|
||||
in_table_c3,
|
||||
in_table_c4,
|
||||
in_table_c5,
|
||||
in_table_c6,
|
||||
in_table_c7,
|
||||
in_table_c8,
|
||||
in_table_c9,
|
||||
in_table_c12,
|
||||
in_table_c21_c22,
|
||||
in_table_d1,
|
||||
in_table_d2,
|
||||
)
|
||||
from unicodedata import normalize
|
||||
|
||||
from ansible.module_utils.six import text_type
|
||||
|
||||
|
||||
def is_unicode_str(string):
|
||||
return True if isinstance(string, text_type) else False
|
||||
|
||||
|
||||
def mapping_profile(string):
|
||||
"""RFC4013 Mapping profile implementation."""
|
||||
# Regarding RFC4013,
|
||||
# This profile specifies:
|
||||
# - non-ASCII space characters [StringPrep, C.1.2] that can be
|
||||
# mapped to SPACE (U+0020), and
|
||||
# - the "commonly mapped to nothing" characters [StringPrep, B.1]
|
||||
# that can be mapped to nothing.
|
||||
|
||||
tmp = []
|
||||
for c in string:
|
||||
# If not the "commonly mapped to nothing"
|
||||
if not in_table_b1(c):
|
||||
if in_table_c12(c):
|
||||
# map non-ASCII space characters
|
||||
# (that can be mapped) to Unicode space
|
||||
tmp.append(u' ')
|
||||
else:
|
||||
tmp.append(c)
|
||||
|
||||
return u"".join(tmp)
|
||||
|
||||
|
||||
def is_ral_string(string):
|
||||
"""RFC3454 Check bidirectional category of the string"""
|
||||
# Regarding RFC3454,
|
||||
# Table D.1 lists the characters that belong
|
||||
# to Unicode bidirectional categories "R" and "AL".
|
||||
# If a string contains any RandALCat character, a RandALCat
|
||||
# character MUST be the first character of the string, and a
|
||||
# RandALCat character MUST be the last character of the string.
|
||||
if in_table_d1(string[0]):
|
||||
if not in_table_d1(string[-1]):
|
||||
raise ValueError('RFC3454: incorrect bidirectional RandALCat string.')
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def prohibited_output_profile(string):
|
||||
"""RFC4013 Prohibited output profile implementation."""
|
||||
# Implements:
|
||||
# RFC4013, 2.3. Prohibited Output.
|
||||
# This profile specifies the following characters as prohibited input:
|
||||
# - Non-ASCII space characters [StringPrep, C.1.2]
|
||||
# - ASCII control characters [StringPrep, C.2.1]
|
||||
# - Non-ASCII control characters [StringPrep, C.2.2]
|
||||
# - Private Use characters [StringPrep, C.3]
|
||||
# - Non-character code points [StringPrep, C.4]
|
||||
# - Surrogate code points [StringPrep, C.5]
|
||||
# - Inappropriate for plain text characters [StringPrep, C.6]
|
||||
# - Inappropriate for canonical representation characters [StringPrep, C.7]
|
||||
# - Change display properties or deprecated characters [StringPrep, C.8]
|
||||
# - Tagging characters [StringPrep, C.9]
|
||||
# RFC4013, 2.4. Bidirectional Characters.
|
||||
# RFC4013, 2.5. Unassigned Code Points.
|
||||
|
||||
# Determine how to handle bidirectional characters (RFC3454):
|
||||
if is_ral_string(string):
|
||||
# If a string contains any RandALCat characters,
|
||||
# The string MUST NOT contain any LCat character:
|
||||
is_prohibited_bidi_ch = in_table_d2
|
||||
bidi_table = 'D.2'
|
||||
else:
|
||||
# Forbid RandALCat characters in LCat string:
|
||||
is_prohibited_bidi_ch = in_table_d1
|
||||
bidi_table = 'D.1'
|
||||
|
||||
RFC = 'RFC4013'
|
||||
for c in string:
|
||||
# RFC4013 2.3. Prohibited Output:
|
||||
if in_table_c12(c):
|
||||
raise ValueError('%s: prohibited non-ASCII space characters '
|
||||
'that cannot be replaced (C.1.2).' % RFC)
|
||||
if in_table_c21_c22(c):
|
||||
raise ValueError('%s: prohibited control characters (C.2.1).' % RFC)
|
||||
if in_table_c3(c):
|
||||
raise ValueError('%s: prohibited private Use characters (C.3).' % RFC)
|
||||
if in_table_c4(c):
|
||||
raise ValueError('%s: prohibited non-character code points (C.4).' % RFC)
|
||||
if in_table_c5(c):
|
||||
raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC)
|
||||
if in_table_c6(c):
|
||||
raise ValueError('%s: prohibited inappropriate for plain text '
|
||||
'characters (C.6).' % RFC)
|
||||
if in_table_c7(c):
|
||||
raise ValueError('%s: prohibited inappropriate for canonical '
|
||||
'representation characters (C.7).' % RFC)
|
||||
if in_table_c8(c):
|
||||
raise ValueError('%s: prohibited change display properties / '
|
||||
'deprecated characters (C.8).' % RFC)
|
||||
if in_table_c9(c):
|
||||
raise ValueError('%s: prohibited tagging characters (C.9).' % RFC)
|
||||
|
||||
# RFC4013, 2.4. Bidirectional Characters:
|
||||
if is_prohibited_bidi_ch(c):
|
||||
raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table))
|
||||
|
||||
# RFC4013, 2.5. Unassigned Code Points:
|
||||
if in_table_a1(c):
|
||||
raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC)
|
||||
|
||||
|
||||
def saslprep(string):
|
||||
"""RFC4013 implementation.
|
||||
Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454)
|
||||
to prepare Unicode strings representing user names and passwords for comparison.
|
||||
Regarding the RFC4013, the "SASLprep" profile is intended to be used by
|
||||
Simple Authentication and Security Layer (SASL) mechanisms
|
||||
(such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols
|
||||
exchanging simple user names and/or passwords.
|
||||
|
||||
Args:
|
||||
string (unicode string): Unicode string to validate and prepare.
|
||||
|
||||
Returns:
|
||||
Prepared unicode string.
|
||||
"""
|
||||
# RFC4013: "The algorithm assumes all strings are
|
||||
# comprised of characters from the Unicode [Unicode] character set."
|
||||
# Validate the string is a Unicode string
|
||||
# (text_type is the string type if PY3 and unicode otherwise):
|
||||
if not is_unicode_str(string):
|
||||
raise TypeError('input must be of type %s, not %s' % (text_type, type(string)))
|
||||
|
||||
# RFC4013: 2.1. Mapping.
|
||||
string = mapping_profile(string)
|
||||
|
||||
# RFC4013: 2.2. Normalization.
|
||||
# "This profile specifies using Unicode normalization form KC."
|
||||
string = normalize('NFKC', string)
|
||||
if not string:
|
||||
return u''
|
||||
|
||||
# RFC4013: 2.3. Prohibited Output.
|
||||
# RFC4013: 2.4. Bidirectional Characters.
|
||||
# RFC4013: 2.5. Unassigned Code Points.
|
||||
prohibited_output_profile(string)
|
||||
|
||||
return string
|
@ -0,0 +1,196 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
|
||||
def scaleway_argument_spec():
|
||||
return dict(
|
||||
api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
|
||||
no_log=True, aliases=['oauth_token']),
|
||||
api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
|
||||
api_timeout=dict(type='int', default=30, aliases=['timeout']),
|
||||
query_parameters=dict(type='dict', default={}),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
)
|
||||
|
||||
|
||||
def payload_from_object(scw_object):
|
||||
return dict(
|
||||
(k, v)
|
||||
for k, v in scw_object.items()
|
||||
if k != 'id' and v is not None
|
||||
)
|
||||
|
||||
|
||||
class ScalewayException(Exception):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
|
||||
|
||||
# Specify a complete Link header, for validation purposes
|
||||
R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
|
||||
(,<[^>]+>;\srel="(first|previous|next|last)")*'''
|
||||
# Specify a single relation, for iteration and string extraction purposes
|
||||
R_RELATION = r'</?(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
|
||||
|
||||
|
||||
def parse_pagination_link(header):
|
||||
if not re.match(R_LINK_HEADER, header, re.VERBOSE):
|
||||
raise ScalewayException('Scaleway API answered with an invalid Link pagination header')
|
||||
else:
|
||||
relations = header.split(',')
|
||||
parsed_relations = {}
|
||||
rc_relation = re.compile(R_RELATION)
|
||||
for relation in relations:
|
||||
match = rc_relation.match(relation)
|
||||
if not match:
|
||||
raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header')
|
||||
data = match.groupdict()
|
||||
parsed_relations[data['relation']] = data['target_IRI']
|
||||
return parsed_relations
|
||||
|
||||
|
||||
class Response(object):
|
||||
|
||||
def __init__(self, resp, info):
|
||||
self.body = None
|
||||
if resp:
|
||||
self.body = resp.read()
|
||||
self.info = info
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
if not self.body:
|
||||
if "body" in self.info:
|
||||
return json.loads(self.info["body"])
|
||||
return None
|
||||
try:
|
||||
return json.loads(self.body)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
return self.info["status"]
|
||||
|
||||
@property
|
||||
def ok(self):
|
||||
return self.status_code in (200, 201, 202, 204)
|
||||
|
||||
|
||||
class Scaleway(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.headers = {
|
||||
'X-Auth-Token': self.module.params.get('api_token'),
|
||||
'User-Agent': self.get_user_agent_string(module),
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
self.name = None
|
||||
|
||||
def get_resources(self):
|
||||
results = self.get('/%s' % self.name)
|
||||
|
||||
if not results.ok:
|
||||
raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
|
||||
self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
|
||||
results.status_code, results.json['message']
|
||||
))
|
||||
|
||||
return results.json.get(self.name)
|
||||
|
||||
def _url_builder(self, path, params):
|
||||
d = self.module.params.get('query_parameters')
|
||||
if params is not None:
|
||||
d.update(params)
|
||||
query_string = urlencode(d, doseq=True)
|
||||
|
||||
if path[0] == '/':
|
||||
path = path[1:]
|
||||
return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string)
|
||||
|
||||
def send(self, method, path, data=None, headers=None, params=None):
|
||||
url = self._url_builder(path=path, params=params)
|
||||
self.warn(url)
|
||||
|
||||
if headers is not None:
|
||||
self.headers.update(headers)
|
||||
|
||||
if self.headers['Content-Type'] == "application/json":
|
||||
data = self.module.jsonify(data)
|
||||
|
||||
resp, info = fetch_url(
|
||||
self.module, url, data=data, headers=self.headers, method=method,
|
||||
timeout=self.module.params.get('api_timeout')
|
||||
)
|
||||
|
||||
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
|
||||
if info['status'] == -1:
|
||||
self.module.fail_json(msg=info['msg'])
|
||||
|
||||
return Response(resp, info)
|
||||
|
||||
@staticmethod
|
||||
def get_user_agent_string(module):
|
||||
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
|
||||
|
||||
def get(self, path, data=None, headers=None, params=None):
|
||||
return self.send(method='GET', path=path, data=data, headers=headers, params=params)
|
||||
|
||||
def put(self, path, data=None, headers=None, params=None):
|
||||
return self.send(method='PUT', path=path, data=data, headers=headers, params=params)
|
||||
|
||||
def post(self, path, data=None, headers=None, params=None):
|
||||
return self.send(method='POST', path=path, data=data, headers=headers, params=params)
|
||||
|
||||
def delete(self, path, data=None, headers=None, params=None):
|
||||
return self.send(method='DELETE', path=path, data=data, headers=headers, params=params)
|
||||
|
||||
def patch(self, path, data=None, headers=None, params=None):
|
||||
return self.send(method="PATCH", path=path, data=data, headers=headers, params=params)
|
||||
|
||||
def update(self, path, data=None, headers=None, params=None):
|
||||
return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params)
|
||||
|
||||
def warn(self, x):
|
||||
self.module.warn(str(x))
|
||||
|
||||
|
||||
SCALEWAY_LOCATION = {
|
||||
'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
|
||||
'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
|
||||
|
||||
'par2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'},
|
||||
'EMEA-FR-PAR2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'},
|
||||
|
||||
'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'},
|
||||
'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'},
|
||||
|
||||
'waw1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'},
|
||||
'EMEA-PL-WAW1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'},
|
||||
}
|
||||
|
||||
SCALEWAY_ENDPOINT = "https://api.scaleway.com"
|
||||
|
||||
SCALEWAY_REGIONS = [
|
||||
"fr-par",
|
||||
"nl-ams",
|
||||
"pl-waw",
|
||||
]
|
||||
|
||||
SCALEWAY_ZONES = [
|
||||
"fr-par-1",
|
||||
"fr-par-2",
|
||||
"nl-ams-1",
|
||||
"pl-waw-1",
|
||||
]
|
@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.urls import fetch_url, basic_auth_header
|
||||
|
||||
|
||||
class BitbucketHelper:
|
||||
BITBUCKET_API_URL = 'https://api.bitbucket.org'
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.access_token = None
|
||||
|
||||
@staticmethod
|
||||
def bitbucket_argument_spec():
|
||||
return dict(
|
||||
client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
|
||||
client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
|
||||
# TODO:
|
||||
# - Rename user to username once current usage of username is removed
|
||||
# - Alias user to username and deprecate it
|
||||
user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
|
||||
password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def bitbucket_required_one_of():
|
||||
return [['client_id', 'client_secret', 'user', 'password']]
|
||||
|
||||
@staticmethod
|
||||
def bitbucket_required_together():
|
||||
return [['client_id', 'client_secret'], ['user', 'password']]
|
||||
|
||||
def fetch_access_token(self):
|
||||
if self.module.params['client_id'] and self.module.params['client_secret']:
|
||||
headers = {
|
||||
'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']),
|
||||
}
|
||||
|
||||
info, content = self.request(
|
||||
api_url='https://bitbucket.org/site/oauth2/access_token',
|
||||
method='POST',
|
||||
data='grant_type=client_credentials',
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
if info['status'] == 200:
|
||||
self.access_token = content['access_token']
|
||||
else:
|
||||
self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
|
||||
|
||||
def request(self, api_url, method, data=None, headers=None):
|
||||
headers = headers or {}
|
||||
|
||||
if self.access_token:
|
||||
headers.update({
|
||||
'Authorization': 'Bearer {0}'.format(self.access_token),
|
||||
})
|
||||
elif self.module.params['user'] and self.module.params['password']:
|
||||
headers.update({
|
||||
'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']),
|
||||
})
|
||||
|
||||
if isinstance(data, dict):
|
||||
data = self.module.jsonify(data)
|
||||
headers.update({
|
||||
'Content-type': 'application/json',
|
||||
})
|
||||
|
||||
response, info = fetch_url(
|
||||
module=self.module,
|
||||
url=api_url,
|
||||
method=method,
|
||||
headers=headers,
|
||||
data=data,
|
||||
force=True,
|
||||
)
|
||||
|
||||
content = {}
|
||||
|
||||
if response is not None:
|
||||
body = to_text(response.read())
|
||||
if body:
|
||||
content = json.loads(body)
|
||||
|
||||
return info, content
|
@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2018 Luca 'remix_tj' Lorenzetto
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
emc_vnx_argument_spec = {
|
||||
'sp_address': dict(type='str', required=True),
|
||||
'sp_user': dict(type='str', required=False, default='sysadmin'),
|
||||
'sp_password': dict(type='str', required=False, default='sysadmin',
|
||||
no_log=True),
|
||||
}
|
@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.module_utils import basic
|
||||
|
||||
|
||||
def convert_to_binary_multiple(size_with_unit):
|
||||
if size_with_unit is None:
|
||||
return -1
|
||||
valid_units = ['MiB', 'GiB', 'TiB']
|
||||
valid_unit = False
|
||||
for unit in valid_units:
|
||||
if size_with_unit.strip().endswith(unit):
|
||||
valid_unit = True
|
||||
size = size_with_unit.split(unit)[0]
|
||||
if float(size) < 0:
|
||||
return -1
|
||||
if not valid_unit:
|
||||
raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units))
|
||||
|
||||
size = size_with_unit.replace(" ", "").split('iB')[0]
|
||||
size_kib = basic.human_to_bytes(size)
|
||||
return int(size_kib / (1024 * 1024))
|
||||
|
||||
|
||||
storage_system_spec = {
|
||||
"storage_system_ip": {
|
||||
"required": True,
|
||||
"type": "str"
|
||||
},
|
||||
"storage_system_username": {
|
||||
"required": True,
|
||||
"type": "str",
|
||||
"no_log": True
|
||||
},
|
||||
"storage_system_password": {
|
||||
"required": True,
|
||||
"type": "str",
|
||||
"no_log": True
|
||||
},
|
||||
"secure": {
|
||||
"type": "bool",
|
||||
"default": False
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def cpg_argument_spec():
|
||||
spec = {
|
||||
"state": {
|
||||
"required": True,
|
||||
"choices": ['present', 'absent'],
|
||||
"type": 'str'
|
||||
},
|
||||
"cpg_name": {
|
||||
"required": True,
|
||||
"type": "str"
|
||||
},
|
||||
"domain": {
|
||||
"type": "str"
|
||||
},
|
||||
"growth_increment": {
|
||||
"type": "str",
|
||||
},
|
||||
"growth_limit": {
|
||||
"type": "str",
|
||||
},
|
||||
"growth_warning": {
|
||||
"type": "str",
|
||||
},
|
||||
"raid_type": {
|
||||
"required": False,
|
||||
"type": "str",
|
||||
"choices": ['R0', 'R1', 'R5', 'R6']
|
||||
},
|
||||
"set_size": {
|
||||
"required": False,
|
||||
"type": "int"
|
||||
},
|
||||
"high_availability": {
|
||||
"type": "str",
|
||||
"choices": ['PORT', 'CAGE', 'MAG']
|
||||
},
|
||||
"disk_type": {
|
||||
"type": "str",
|
||||
"choices": ['FC', 'NL', 'SSD']
|
||||
}
|
||||
}
|
||||
spec.update(storage_system_spec)
|
||||
return spec
|
@ -0,0 +1,278 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2016, Adfinis SyGroup AG
|
||||
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
"""Univention Corporate Server (UCS) access module.
|
||||
|
||||
Provides the following functions for working with an UCS server.
|
||||
|
||||
- ldap_search(filter, base=None, attr=None)
|
||||
Search the LDAP via Univention's LDAP wrapper (ULDAP)
|
||||
|
||||
- config_registry()
|
||||
Return the UCR registry object
|
||||
|
||||
- base_dn()
|
||||
Return the configured Base DN according to the UCR
|
||||
|
||||
- uldap()
|
||||
Return a handle to the ULDAP LDAP wrapper
|
||||
|
||||
- umc_module_for_add(module, container_dn, superordinate=None)
|
||||
Return a UMC module for creating a new object of the given type
|
||||
|
||||
- umc_module_for_edit(module, object_dn, superordinate=None)
|
||||
Return a UMC module for editing an existing object of the given type
|
||||
|
||||
|
||||
Any other module is not part of the "official" API and may change at any time.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
|
||||
__all__ = [
|
||||
'ldap_search',
|
||||
'config_registry',
|
||||
'base_dn',
|
||||
'uldap',
|
||||
'umc_module_for_add',
|
||||
'umc_module_for_edit',
|
||||
]
|
||||
|
||||
|
||||
_singletons = {}
|
||||
|
||||
|
||||
def ldap_module():
|
||||
import ldap as orig_ldap
|
||||
return orig_ldap
|
||||
|
||||
|
||||
def _singleton(name, constructor):
|
||||
if name in _singletons:
|
||||
return _singletons[name]
|
||||
_singletons[name] = constructor()
|
||||
return _singletons[name]
|
||||
|
||||
|
||||
def config_registry():
|
||||
|
||||
def construct():
|
||||
import univention.config_registry
|
||||
ucr = univention.config_registry.ConfigRegistry()
|
||||
ucr.load()
|
||||
return ucr
|
||||
|
||||
return _singleton('config_registry', construct)
|
||||
|
||||
|
||||
def base_dn():
|
||||
return config_registry()['ldap/base']
|
||||
|
||||
|
||||
def uldap():
|
||||
"Return a configured univention uldap object"
|
||||
|
||||
def construct():
|
||||
try:
|
||||
secret_file = open('/etc/ldap.secret', 'r')
|
||||
bind_dn = 'cn=admin,{0}'.format(base_dn())
|
||||
except IOError: # pragma: no cover
|
||||
secret_file = open('/etc/machine.secret', 'r')
|
||||
bind_dn = config_registry()["ldap/hostdn"]
|
||||
pwd_line = secret_file.readline()
|
||||
pwd = re.sub('\n', '', pwd_line)
|
||||
|
||||
import univention.admin.uldap
|
||||
return univention.admin.uldap.access(
|
||||
host=config_registry()['ldap/master'],
|
||||
base=base_dn(),
|
||||
binddn=bind_dn,
|
||||
bindpw=pwd,
|
||||
start_tls=1,
|
||||
)
|
||||
|
||||
return _singleton('uldap', construct)
|
||||
|
||||
|
||||
def config():
|
||||
def construct():
|
||||
import univention.admin.config
|
||||
return univention.admin.config.config()
|
||||
return _singleton('config', construct)
|
||||
|
||||
|
||||
def init_modules():
|
||||
def construct():
|
||||
import univention.admin.modules
|
||||
univention.admin.modules.update()
|
||||
return True
|
||||
return _singleton('modules_initialized', construct)
|
||||
|
||||
|
||||
def position_base_dn():
|
||||
def construct():
|
||||
import univention.admin.uldap
|
||||
return univention.admin.uldap.position(base_dn())
|
||||
return _singleton('position_base_dn', construct)
|
||||
|
||||
|
||||
def ldap_dn_tree_parent(dn, count=1):
|
||||
dn_array = dn.split(',')
|
||||
dn_array[0:count] = []
|
||||
return ','.join(dn_array)
|
||||
|
||||
|
||||
def ldap_search(filter, base=None, attr=None):
|
||||
"""Replaces uldaps search and uses a generator.
|
||||
!! Arguments are not the same."""
|
||||
|
||||
if base is None:
|
||||
base = base_dn()
|
||||
msgid = uldap().lo.lo.search(
|
||||
base,
|
||||
ldap_module().SCOPE_SUBTREE,
|
||||
filterstr=filter,
|
||||
attrlist=attr
|
||||
)
|
||||
# I used to have a try: finally: here but there seems to be a bug in python
|
||||
# which swallows the KeyboardInterrupt
|
||||
# The abandon now doesn't make too much sense
|
||||
while True:
|
||||
result_type, result_data = uldap().lo.lo.result(msgid, all=0)
|
||||
if not result_data:
|
||||
break
|
||||
if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover
|
||||
break
|
||||
else:
|
||||
if result_type is ldap_module().RES_SEARCH_ENTRY:
|
||||
for res in result_data:
|
||||
yield res
|
||||
uldap().lo.lo.abandon(msgid)
|
||||
|
||||
|
||||
def module_by_name(module_name_):
|
||||
"""Returns an initialized UMC module, identified by the given name.
|
||||
|
||||
The module is a module specification according to the udm commandline.
|
||||
Example values are:
|
||||
* users/user
|
||||
* shares/share
|
||||
* groups/group
|
||||
|
||||
If the module does not exist, a KeyError is raised.
|
||||
|
||||
The modules are cached, so they won't be re-initialized
|
||||
in subsequent calls.
|
||||
"""
|
||||
|
||||
def construct():
|
||||
import univention.admin.modules
|
||||
init_modules()
|
||||
module = univention.admin.modules.get(module_name_)
|
||||
univention.admin.modules.init(uldap(), position_base_dn(), module)
|
||||
return module
|
||||
|
||||
return _singleton('module/%s' % module_name_, construct)
|
||||
|
||||
|
||||
def get_umc_admin_objects():
|
||||
"""Convenience accessor for getting univention.admin.objects.
|
||||
|
||||
This implements delayed importing, so the univention.* modules
|
||||
are not loaded until this function is called.
|
||||
"""
|
||||
import univention.admin
|
||||
return univention.admin.objects
|
||||
|
||||
|
||||
def umc_module_for_add(module, container_dn, superordinate=None):
|
||||
"""Returns an UMC module object prepared for creating a new entry.
|
||||
|
||||
The module is a module specification according to the udm commandline.
|
||||
Example values are:
|
||||
* users/user
|
||||
* shares/share
|
||||
* groups/group
|
||||
|
||||
The container_dn MUST be the dn of the container (not of the object to
|
||||
be created itself!).
|
||||
"""
|
||||
mod = module_by_name(module)
|
||||
|
||||
position = position_base_dn()
|
||||
position.setDn(container_dn)
|
||||
|
||||
# config, ldap objects from common module
|
||||
obj = mod.object(config(), uldap(), position, superordinate=superordinate)
|
||||
obj.open()
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def umc_module_for_edit(module, object_dn, superordinate=None):
|
||||
"""Returns an UMC module object prepared for editing an existing entry.
|
||||
|
||||
The module is a module specification according to the udm commandline.
|
||||
Example values are:
|
||||
* users/user
|
||||
* shares/share
|
||||
* groups/group
|
||||
|
||||
The object_dn MUST be the dn of the object itself, not the container!
|
||||
"""
|
||||
mod = module_by_name(module)
|
||||
|
||||
objects = get_umc_admin_objects()
|
||||
|
||||
position = position_base_dn()
|
||||
position.setDn(ldap_dn_tree_parent(object_dn))
|
||||
|
||||
obj = objects.get(
|
||||
mod,
|
||||
config(),
|
||||
uldap(),
|
||||
position=position,
|
||||
superordinate=superordinate,
|
||||
dn=object_dn
|
||||
)
|
||||
obj.open()
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def create_containers_and_parents(container_dn):
|
||||
"""Create a container and if needed the parents containers"""
|
||||
import univention.admin.uexceptions as uexcp
|
||||
if not container_dn.startswith("cn="):
|
||||
raise AssertionError()
|
||||
try:
|
||||
parent = ldap_dn_tree_parent(container_dn)
|
||||
obj = umc_module_for_add(
|
||||
'container/cn',
|
||||
parent
|
||||
)
|
||||
obj['name'] = container_dn.split(',')[0].split('=')[1]
|
||||
obj['description'] = "container created by import"
|
||||
except uexcp.ldapError:
|
||||
create_containers_and_parents(parent)
|
||||
obj = umc_module_for_add(
|
||||
'container/cn',
|
||||
parent
|
||||
)
|
||||
obj['name'] = container_dn.split(',')[0].split('=')[1]
|
||||
obj['description'] = "container created by import"
|
@ -0,0 +1,217 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
class UTMModuleConfigurationError(Exception):
|
||||
|
||||
def __init__(self, msg, **args):
|
||||
super(UTMModuleConfigurationError, self).__init__(self, msg)
|
||||
self.msg = msg
|
||||
self.module_fail_args = args
|
||||
|
||||
def do_fail(self, module):
|
||||
module.fail_json(msg=self.msg, other=self.module_fail_args)
|
||||
|
||||
|
||||
class UTMModule(AnsibleModule):
|
||||
"""
|
||||
This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
|
||||
protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
|
||||
just initialize this UTMModule class and define the Payload fields that are needed for your module.
|
||||
See the other modules like utm_aaa_group for example.
|
||||
"""
|
||||
|
||||
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
|
||||
mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
|
||||
supports_check_mode=False, required_if=None):
|
||||
default_specs = dict(
|
||||
headers=dict(type='dict', required=False, default={}),
|
||||
utm_host=dict(type='str', required=True),
|
||||
utm_port=dict(type='int', default=4444),
|
||||
utm_token=dict(type='str', required=True, no_log=True),
|
||||
utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
|
||||
validate_certs=dict(type='bool', required=False, default=True),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
|
||||
mutually_exclusive, required_together, required_one_of,
|
||||
add_file_common_args, supports_check_mode, required_if)
|
||||
|
||||
def _merge_specs(self, default_specs, custom_specs):
|
||||
result = default_specs.copy()
|
||||
result.update(custom_specs)
|
||||
return result
|
||||
|
||||
|
||||
class UTM:
|
||||
|
||||
def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
|
||||
"""
|
||||
Initialize UTM Class
|
||||
:param module: The Ansible module
|
||||
:param endpoint: The corresponding endpoint to the module
|
||||
:param change_relevant_keys: The keys of the object to check for changes
|
||||
:param info_only: When implementing an info module, set this to true. Will allow access to the info method only
|
||||
"""
|
||||
self.info_only = info_only
|
||||
self.module = module
|
||||
self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
|
||||
module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
|
||||
|
||||
"""
|
||||
The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
|
||||
"""
|
||||
self.change_relevant_keys = change_relevant_keys
|
||||
self.module.params['url_username'] = 'token'
|
||||
self.module.params['url_password'] = module.params.get('utm_token')
|
||||
if all(elem in self.change_relevant_keys for elem in module.params.keys()):
|
||||
raise UTMModuleConfigurationError(
|
||||
"The keys " + to_native(
|
||||
self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
|
||||
list(module.params.keys())))
|
||||
|
||||
def execute(self):
|
||||
try:
|
||||
if not self.info_only:
|
||||
if self.module.params.get('state') == 'present':
|
||||
self._add()
|
||||
elif self.module.params.get('state') == 'absent':
|
||||
self._remove()
|
||||
else:
|
||||
self._info()
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg=to_native(e))
|
||||
|
||||
def _info(self):
|
||||
"""
|
||||
returns the info for an object in utm
|
||||
"""
|
||||
info, result = self._lookup_entry(self.module, self.request_url)
|
||||
if info["status"] >= 400:
|
||||
self.module.fail_json(result=json.loads(info))
|
||||
else:
|
||||
if result is None:
|
||||
self.module.exit_json(changed=False)
|
||||
else:
|
||||
self.module.exit_json(result=result, changed=False)
|
||||
|
||||
def _add(self):
|
||||
"""
|
||||
adds or updates a host object on utm
|
||||
"""
|
||||
|
||||
combined_headers = self._combine_headers()
|
||||
|
||||
is_changed = False
|
||||
info, result = self._lookup_entry(self.module, self.request_url)
|
||||
if info["status"] >= 400:
|
||||
self.module.fail_json(result=json.loads(info))
|
||||
else:
|
||||
data_as_json_string = self.module.jsonify(self.module.params)
|
||||
if result is None:
|
||||
response, info = fetch_url(self.module, self.request_url, method="POST",
|
||||
headers=combined_headers,
|
||||
data=data_as_json_string)
|
||||
if info["status"] >= 400:
|
||||
self.module.fail_json(msg=json.loads(info["body"]))
|
||||
is_changed = True
|
||||
result = self._clean_result(json.loads(response.read()))
|
||||
else:
|
||||
if self._is_object_changed(self.change_relevant_keys, self.module, result):
|
||||
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
|
||||
headers=combined_headers,
|
||||
data=data_as_json_string)
|
||||
if info['status'] >= 400:
|
||||
self.module.fail_json(msg=json.loads(info["body"]))
|
||||
is_changed = True
|
||||
result = self._clean_result(json.loads(response.read()))
|
||||
self.module.exit_json(result=result, changed=is_changed)
|
||||
|
||||
def _combine_headers(self):
|
||||
"""
|
||||
This will combine a header default with headers that come from the module declaration
|
||||
:return: A combined headers dict
|
||||
"""
|
||||
default_headers = {"Accept": "application/json", "Content-type": "application/json"}
|
||||
if self.module.params.get('headers') is not None:
|
||||
result = default_headers.copy()
|
||||
result.update(self.module.params.get('headers'))
|
||||
else:
|
||||
result = default_headers
|
||||
return result
|
||||
|
||||
def _remove(self):
|
||||
"""
|
||||
removes an object from utm
|
||||
"""
|
||||
is_changed = False
|
||||
info, result = self._lookup_entry(self.module, self.request_url)
|
||||
if result is not None:
|
||||
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
|
||||
headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
|
||||
data=self.module.jsonify(self.module.params))
|
||||
if info["status"] >= 400:
|
||||
self.module.fail_json(msg=json.loads(info["body"]))
|
||||
else:
|
||||
is_changed = True
|
||||
self.module.exit_json(changed=is_changed)
|
||||
|
||||
def _lookup_entry(self, module, request_url):
|
||||
"""
|
||||
Lookup for existing entry
|
||||
:param module:
|
||||
:param request_url:
|
||||
:return:
|
||||
"""
|
||||
response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
|
||||
result = None
|
||||
if response is not None:
|
||||
results = json.loads(response.read())
|
||||
result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
|
||||
return info, result
|
||||
|
||||
def _clean_result(self, result):
|
||||
"""
|
||||
Will clean the result from irrelevant fields
|
||||
:param result: The result from the query
|
||||
:return: The modified result
|
||||
"""
|
||||
del result['utm_host']
|
||||
del result['utm_port']
|
||||
del result['utm_token']
|
||||
del result['utm_protocol']
|
||||
del result['validate_certs']
|
||||
del result['url_username']
|
||||
del result['url_password']
|
||||
del result['state']
|
||||
return result
|
||||
|
||||
def _is_object_changed(self, keys, module, result):
|
||||
"""
|
||||
Check if my object is changed
|
||||
:param keys: The keys that will determine if an object is changed
|
||||
:param module: The module
|
||||
:param result: The result from the query
|
||||
:return:
|
||||
"""
|
||||
for key in keys:
|
||||
if module.params.get(key) != result[key]:
|
||||
return True
|
||||
return False
|
@ -0,0 +1,17 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""Provide version object to compare version numbers."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
|
||||
# remove the _version.py file, and replace the following import by
|
||||
#
|
||||
# from ansible.module_utils.compat.version import LooseVersion
|
||||
|
||||
from ._version import LooseVersion
|
@ -0,0 +1,97 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
HAS_VEXATAPI = True
|
||||
try:
|
||||
from vexatapi.vexata_api_proxy import VexataAPIProxy
|
||||
except ImportError:
|
||||
HAS_VEXATAPI = False
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
|
||||
VXOS_VERSION = None
|
||||
|
||||
|
||||
def get_version(iocs_json):
|
||||
if not iocs_json:
|
||||
raise Exception('Invalid IOC json')
|
||||
active = filter(lambda x: x['mgmtRole'], iocs_json)
|
||||
if not active:
|
||||
raise Exception('Unable to detect active IOC')
|
||||
active = active[0]
|
||||
ver = active['swVersion']
|
||||
if ver[0] != 'v':
|
||||
raise Exception('Illegal version string')
|
||||
ver = ver[1:ver.find('-')]
|
||||
ver = map(int, ver.split('.'))
|
||||
return tuple(ver)
|
||||
|
||||
|
||||
def get_array(module):
|
||||
"""Return storage array object or fail"""
|
||||
global VXOS_VERSION
|
||||
array = module.params['array']
|
||||
user = module.params.get('user', None)
|
||||
password = module.params.get('password', None)
|
||||
validate = module.params.get('validate_certs')
|
||||
|
||||
if not HAS_VEXATAPI:
|
||||
module.fail_json(msg='vexatapi library is required for this module. '
|
||||
'To install, use `pip install vexatapi`')
|
||||
|
||||
if user and password:
|
||||
system = VexataAPIProxy(array, user, password, verify_cert=validate)
|
||||
else:
|
||||
module.fail_json(msg='The user/password are required to be passed in to '
|
||||
'the module as arguments or by setting the '
|
||||
'VEXATA_USER and VEXATA_PASSWORD environment variables.')
|
||||
try:
|
||||
if system.test_connection():
|
||||
VXOS_VERSION = get_version(system.iocs())
|
||||
return system
|
||||
else:
|
||||
module.fail_json(msg='Test connection to array failed.')
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
|
||||
|
||||
|
||||
def argument_spec():
|
||||
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
|
||||
return dict(
|
||||
array=dict(type='str',
|
||||
required=True),
|
||||
user=dict(type='str',
|
||||
fallback=(env_fallback, ['VEXATA_USER'])),
|
||||
password=dict(type='str',
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['VEXATA_PASSWORD'])),
|
||||
validate_certs=dict(type='bool',
|
||||
required=False,
|
||||
default=False),
|
||||
)
|
||||
|
||||
|
||||
def required_together():
|
||||
"""Return the default list used for the required_together argument to AnsibleModule"""
|
||||
return [['user', 'password']]
|
||||
|
||||
|
||||
def size_to_MiB(size):
|
||||
"""Convert a '<integer>[MGT]' string to MiB, return -1 on error."""
|
||||
quant = size[:-1]
|
||||
exponent = size[-1]
|
||||
if not quant.isdigit() or exponent not in 'MGT':
|
||||
return -1
|
||||
quant = int(quant)
|
||||
if exponent == 'G':
|
||||
quant <<= 10
|
||||
elif exponent == 'T':
|
||||
quant <<= 20
|
||||
return quant
|
@ -0,0 +1,861 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import atexit
|
||||
import time
|
||||
import re
|
||||
import traceback
|
||||
|
||||
XENAPI_IMP_ERR = None
|
||||
try:
|
||||
import XenAPI
|
||||
HAS_XENAPI = True
|
||||
except ImportError:
|
||||
HAS_XENAPI = False
|
||||
XENAPI_IMP_ERR = traceback.format_exc()
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
|
||||
|
||||
|
||||
def xenserver_common_argument_spec():
|
||||
return dict(
|
||||
hostname=dict(type='str',
|
||||
aliases=['host', 'pool'],
|
||||
required=False,
|
||||
default='localhost',
|
||||
fallback=(env_fallback, ['XENSERVER_HOST']),
|
||||
),
|
||||
username=dict(type='str',
|
||||
aliases=['user', 'admin'],
|
||||
required=False,
|
||||
default='root',
|
||||
fallback=(env_fallback, ['XENSERVER_USER'])),
|
||||
password=dict(type='str',
|
||||
aliases=['pass', 'pwd'],
|
||||
required=False,
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
|
||||
validate_certs=dict(type='bool',
|
||||
required=False,
|
||||
default=True,
|
||||
fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
|
||||
)
|
||||
|
||||
|
||||
def xapi_to_module_vm_power_state(power_state):
|
||||
"""Maps XAPI VM power states to module VM power states."""
|
||||
module_power_state_map = {
|
||||
"running": "poweredon",
|
||||
"halted": "poweredoff",
|
||||
"suspended": "suspended",
|
||||
"paused": "paused"
|
||||
}
|
||||
|
||||
return module_power_state_map.get(power_state)
|
||||
|
||||
|
||||
def module_to_xapi_vm_power_state(power_state):
|
||||
"""Maps module VM power states to XAPI VM power states."""
|
||||
vm_power_state_map = {
|
||||
"poweredon": "running",
|
||||
"poweredoff": "halted",
|
||||
"restarted": "running",
|
||||
"suspended": "suspended",
|
||||
"shutdownguest": "halted",
|
||||
"rebootguest": "running",
|
||||
}
|
||||
|
||||
return vm_power_state_map.get(power_state)
|
||||
|
||||
|
||||
def is_valid_ip_addr(ip_addr):
|
||||
"""Validates given string as IPv4 address for given string.
|
||||
|
||||
Args:
|
||||
ip_addr (str): string to validate as IPv4 address.
|
||||
|
||||
Returns:
|
||||
bool: True if string is valid IPv4 address, else False.
|
||||
"""
|
||||
ip_addr_split = ip_addr.split('.')
|
||||
|
||||
if len(ip_addr_split) != 4:
|
||||
return False
|
||||
|
||||
for ip_addr_octet in ip_addr_split:
|
||||
if not ip_addr_octet.isdigit():
|
||||
return False
|
||||
|
||||
ip_addr_octet_int = int(ip_addr_octet)
|
||||
|
||||
if ip_addr_octet_int < 0 or ip_addr_octet_int > 255:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_valid_ip_netmask(ip_netmask):
|
||||
"""Validates given string as IPv4 netmask.
|
||||
|
||||
Args:
|
||||
ip_netmask (str): string to validate as IPv4 netmask.
|
||||
|
||||
Returns:
|
||||
bool: True if string is valid IPv4 netmask, else False.
|
||||
"""
|
||||
ip_netmask_split = ip_netmask.split('.')
|
||||
|
||||
if len(ip_netmask_split) != 4:
|
||||
return False
|
||||
|
||||
valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
|
||||
|
||||
for ip_netmask_octet in ip_netmask_split:
|
||||
if ip_netmask_octet not in valid_octet_values:
|
||||
return False
|
||||
|
||||
if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
|
||||
return False
|
||||
elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
|
||||
return False
|
||||
elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_valid_ip_prefix(ip_prefix):
|
||||
"""Validates given string as IPv4 prefix.
|
||||
|
||||
Args:
|
||||
ip_prefix (str): string to validate as IPv4 prefix.
|
||||
|
||||
Returns:
|
||||
bool: True if string is valid IPv4 prefix, else False.
|
||||
"""
|
||||
if not ip_prefix.isdigit():
|
||||
return False
|
||||
|
||||
ip_prefix_int = int(ip_prefix)
|
||||
|
||||
if ip_prefix_int < 0 or ip_prefix_int > 32:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def ip_prefix_to_netmask(ip_prefix, skip_check=False):
|
||||
"""Converts IPv4 prefix to netmask.
|
||||
|
||||
Args:
|
||||
ip_prefix (str): IPv4 prefix to convert.
|
||||
skip_check (bool): Skip validation of IPv4 prefix
|
||||
(default: False). Use if you are sure IPv4 prefix is valid.
|
||||
|
||||
Returns:
|
||||
str: IPv4 netmask equivalent to given IPv4 prefix if
|
||||
IPv4 prefix is valid, else an empty string.
|
||||
"""
|
||||
if skip_check:
|
||||
ip_prefix_valid = True
|
||||
else:
|
||||
ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
|
||||
|
||||
if ip_prefix_valid:
|
||||
return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def ip_netmask_to_prefix(ip_netmask, skip_check=False):
|
||||
"""Converts IPv4 netmask to prefix.
|
||||
|
||||
Args:
|
||||
ip_netmask (str): IPv4 netmask to convert.
|
||||
skip_check (bool): Skip validation of IPv4 netmask
|
||||
(default: False). Use if you are sure IPv4 netmask is valid.
|
||||
|
||||
Returns:
|
||||
str: IPv4 prefix equivalent to given IPv4 netmask if
|
||||
IPv4 netmask is valid, else an empty string.
|
||||
"""
|
||||
if skip_check:
|
||||
ip_netmask_valid = True
|
||||
else:
|
||||
ip_netmask_valid = is_valid_ip_netmask(ip_netmask)
|
||||
|
||||
if ip_netmask_valid:
|
||||
return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")]))
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def is_valid_ip6_addr(ip6_addr):
|
||||
"""Validates given string as IPv6 address.
|
||||
|
||||
Args:
|
||||
ip6_addr (str): string to validate as IPv6 address.
|
||||
|
||||
Returns:
|
||||
bool: True if string is valid IPv6 address, else False.
|
||||
"""
|
||||
ip6_addr = ip6_addr.lower()
|
||||
ip6_addr_split = ip6_addr.split(':')
|
||||
|
||||
if ip6_addr_split[0] == "":
|
||||
ip6_addr_split.pop(0)
|
||||
|
||||
if ip6_addr_split[-1] == "":
|
||||
ip6_addr_split.pop(-1)
|
||||
|
||||
if len(ip6_addr_split) > 8:
|
||||
return False
|
||||
|
||||
if ip6_addr_split.count("") > 1:
|
||||
return False
|
||||
elif ip6_addr_split.count("") == 1:
|
||||
ip6_addr_split.remove("")
|
||||
else:
|
||||
if len(ip6_addr_split) != 8:
|
||||
return False
|
||||
|
||||
ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
|
||||
|
||||
for ip6_addr_hextet in ip6_addr_split:
|
||||
if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_valid_ip6_prefix(ip6_prefix):
|
||||
"""Validates given string as IPv6 prefix.
|
||||
|
||||
Args:
|
||||
ip6_prefix (str): string to validate as IPv6 prefix.
|
||||
|
||||
Returns:
|
||||
bool: True if string is valid IPv6 prefix, else False.
|
||||
"""
|
||||
if not ip6_prefix.isdigit():
|
||||
return False
|
||||
|
||||
ip6_prefix_int = int(ip6_prefix)
|
||||
|
||||
if ip6_prefix_int < 0 or ip6_prefix_int > 128:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""):
|
||||
"""Finds and returns a reference to arbitrary XAPI object.
|
||||
|
||||
An object is searched by using either name (name_label) or UUID
|
||||
with UUID taken precedence over name.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
name (str): Name (name_label) of an object to search for.
|
||||
uuid (str): UUID of an object to search for.
|
||||
obj_type (str): Any valid XAPI object type. See XAPI docs.
|
||||
fail (bool): Should function fail with error message if object
|
||||
is not found or exit silently (default: True). The function
|
||||
always fails if multiple objects with same name are found.
|
||||
msg_prefix (str): A string error messages should be prefixed
|
||||
with (default: "").
|
||||
|
||||
Returns:
|
||||
XAPI reference to found object or None if object is not found
|
||||
and fail=False.
|
||||
"""
|
||||
xapi_session = XAPI.connect(module)
|
||||
|
||||
if obj_type in ["template", "snapshot"]:
|
||||
real_obj_type = "VM"
|
||||
elif obj_type == "home server":
|
||||
real_obj_type = "host"
|
||||
elif obj_type == "ISO image":
|
||||
real_obj_type = "VDI"
|
||||
else:
|
||||
real_obj_type = obj_type
|
||||
|
||||
obj_ref = None
|
||||
|
||||
# UUID has precedence over name.
|
||||
if uuid:
|
||||
try:
|
||||
# Find object by UUID. If no object is found using given UUID,
|
||||
# an exception will be generated.
|
||||
obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,))
|
||||
except XenAPI.Failure as f:
|
||||
if fail:
|
||||
module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid))
|
||||
elif name:
|
||||
try:
|
||||
# Find object by name (name_label).
|
||||
obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,))
|
||||
except XenAPI.Failure as f:
|
||||
module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
|
||||
# If obj_ref_list is empty.
|
||||
if not obj_ref_list:
|
||||
if fail:
|
||||
module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name))
|
||||
# If obj_ref_list contains multiple object references.
|
||||
elif len(obj_ref_list) > 1:
|
||||
module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name))
|
||||
# The obj_ref_list contains only one object reference.
|
||||
else:
|
||||
obj_ref = obj_ref_list[0]
|
||||
else:
|
||||
module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type))
|
||||
|
||||
return obj_ref
|
||||
|
||||
|
||||
def gather_vm_params(module, vm_ref):
|
||||
"""Gathers all VM parameters available in XAPI database.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
vm_ref (str): XAPI reference to VM.
|
||||
|
||||
Returns:
|
||||
dict: VM parameters.
|
||||
"""
|
||||
# We silently return empty vm_params if bad vm_ref was supplied.
|
||||
if not vm_ref or vm_ref == "OpaqueRef:NULL":
|
||||
return {}
|
||||
|
||||
xapi_session = XAPI.connect(module)
|
||||
|
||||
try:
|
||||
vm_params = xapi_session.xenapi.VM.get_record(vm_ref)
|
||||
|
||||
# We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
|
||||
|
||||
# Affinity.
|
||||
if vm_params['affinity'] != "OpaqueRef:NULL":
|
||||
vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
|
||||
vm_params['affinity'] = vm_affinity
|
||||
else:
|
||||
vm_params['affinity'] = {}
|
||||
|
||||
# VBDs.
|
||||
vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
|
||||
|
||||
# List of VBDs is usually sorted by userdevice but we sort just
|
||||
# in case. We need this list sorted by userdevice so that we can
|
||||
# make positional pairing with module.params['disks'].
|
||||
vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
|
||||
vm_params['VBDs'] = vm_vbd_params_list
|
||||
|
||||
# VDIs.
|
||||
for vm_vbd_params in vm_params['VBDs']:
|
||||
if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
|
||||
vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
|
||||
else:
|
||||
vm_vdi_params = {}
|
||||
|
||||
vm_vbd_params['VDI'] = vm_vdi_params
|
||||
|
||||
# VIFs.
|
||||
vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
|
||||
|
||||
# List of VIFs is usually sorted by device but we sort just
|
||||
# in case. We need this list sorted by device so that we can
|
||||
# make positional pairing with module.params['networks'].
|
||||
vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
|
||||
vm_params['VIFs'] = vm_vif_params_list
|
||||
|
||||
# Networks.
|
||||
for vm_vif_params in vm_params['VIFs']:
|
||||
if vm_vif_params['network'] != "OpaqueRef:NULL":
|
||||
vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
|
||||
else:
|
||||
vm_network_params = {}
|
||||
|
||||
vm_vif_params['network'] = vm_network_params
|
||||
|
||||
# Guest metrics.
|
||||
if vm_params['guest_metrics'] != "OpaqueRef:NULL":
|
||||
vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
|
||||
vm_params['guest_metrics'] = vm_guest_metrics
|
||||
else:
|
||||
vm_params['guest_metrics'] = {}
|
||||
|
||||
# Detect customization agent.
|
||||
xenserver_version = get_xenserver_version(module)
|
||||
|
||||
if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
|
||||
"feature-static-ip-setting" in vm_params['guest_metrics']['other']):
|
||||
vm_params['customization_agent'] = "native"
|
||||
else:
|
||||
vm_params['customization_agent'] = "custom"
|
||||
|
||||
except XenAPI.Failure as f:
|
||||
module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
|
||||
return vm_params
|
||||
|
||||
|
||||
def gather_vm_facts(module, vm_params):
|
||||
"""Gathers VM facts.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
vm_params (dict): A dictionary with VM parameters as returned
|
||||
by gather_vm_params() function.
|
||||
|
||||
Returns:
|
||||
dict: VM facts.
|
||||
"""
|
||||
# We silently return empty vm_facts if no vm_params are available.
|
||||
if not vm_params:
|
||||
return {}
|
||||
|
||||
xapi_session = XAPI.connect(module)
|
||||
|
||||
# Gather facts.
|
||||
vm_facts = {
|
||||
"state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
|
||||
"name": vm_params['name_label'],
|
||||
"name_desc": vm_params['name_description'],
|
||||
"uuid": vm_params['uuid'],
|
||||
"is_template": vm_params['is_a_template'],
|
||||
"folder": vm_params['other_config'].get('folder', ''),
|
||||
"hardware": {
|
||||
"num_cpus": int(vm_params['VCPUs_max']),
|
||||
"num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
|
||||
"memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
|
||||
},
|
||||
"disks": [],
|
||||
"cdrom": {},
|
||||
"networks": [],
|
||||
"home_server": vm_params['affinity'].get('name_label', ''),
|
||||
"domid": vm_params['domid'],
|
||||
"platform": vm_params['platform'],
|
||||
"other_config": vm_params['other_config'],
|
||||
"xenstore_data": vm_params['xenstore_data'],
|
||||
"customization_agent": vm_params['customization_agent'],
|
||||
}
|
||||
|
||||
for vm_vbd_params in vm_params['VBDs']:
|
||||
if vm_vbd_params['type'] == "Disk":
|
||||
vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
|
||||
|
||||
vm_disk_params = {
|
||||
"size": int(vm_vbd_params['VDI']['virtual_size']),
|
||||
"name": vm_vbd_params['VDI']['name_label'],
|
||||
"name_desc": vm_vbd_params['VDI']['name_description'],
|
||||
"sr": vm_disk_sr_params['name_label'],
|
||||
"sr_uuid": vm_disk_sr_params['uuid'],
|
||||
"os_device": vm_vbd_params['device'],
|
||||
"vbd_userdevice": vm_vbd_params['userdevice'],
|
||||
}
|
||||
|
||||
vm_facts['disks'].append(vm_disk_params)
|
||||
elif vm_vbd_params['type'] == "CD":
|
||||
if vm_vbd_params['empty']:
|
||||
vm_facts['cdrom'].update(type="none")
|
||||
else:
|
||||
vm_facts['cdrom'].update(type="iso")
|
||||
vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
|
||||
|
||||
for vm_vif_params in vm_params['VIFs']:
|
||||
vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
|
||||
|
||||
vm_network_params = {
|
||||
"name": vm_vif_params['network']['name_label'],
|
||||
"mac": vm_vif_params['MAC'],
|
||||
"vif_device": vm_vif_params['device'],
|
||||
"mtu": vm_vif_params['MTU'],
|
||||
"ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''),
|
||||
"prefix": "",
|
||||
"netmask": "",
|
||||
"gateway": "",
|
||||
"ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" %
|
||||
vm_vif_params['device'])],
|
||||
"prefix6": "",
|
||||
"gateway6": "",
|
||||
}
|
||||
|
||||
if vm_params['customization_agent'] == "native":
|
||||
if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
|
||||
vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
|
||||
vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
|
||||
|
||||
vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
|
||||
|
||||
if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
|
||||
vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
|
||||
|
||||
vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
|
||||
|
||||
elif vm_params['customization_agent'] == "custom":
|
||||
vm_xenstore_data = vm_params['xenstore_data']
|
||||
|
||||
for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
|
||||
vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "")
|
||||
|
||||
vm_facts['networks'].append(vm_network_params)
|
||||
|
||||
return vm_facts
|
||||
|
||||
|
||||
def set_vm_power_state(module, vm_ref, power_state, timeout=300):
|
||||
"""Controls VM power state.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
vm_ref (str): XAPI reference to VM.
|
||||
power_state (str): Power state to put VM into. Accepted values:
|
||||
|
||||
- poweredon
|
||||
- poweredoff
|
||||
- restarted
|
||||
- suspended
|
||||
- shutdownguest
|
||||
- rebootguest
|
||||
|
||||
timeout (int): timeout in seconds (default: 300).
|
||||
|
||||
Returns:
|
||||
tuple (bool, str): Bool element is True if VM power state has
|
||||
changed by calling this function, else False. Str element carries
|
||||
a value of resulting power state as defined by XAPI - 'running',
|
||||
'halted' or 'suspended'.
|
||||
"""
|
||||
# Fail if we don't have a valid VM reference.
|
||||
if not vm_ref or vm_ref == "OpaqueRef:NULL":
|
||||
module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!")
|
||||
|
||||
xapi_session = XAPI.connect(module)
|
||||
|
||||
power_state = power_state.replace('_', '').replace('-', '').lower()
|
||||
vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
|
||||
|
||||
state_changed = False
|
||||
|
||||
try:
|
||||
# Get current state of the VM.
|
||||
vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
|
||||
|
||||
if vm_power_state_current != power_state:
|
||||
if power_state == "poweredon":
|
||||
if not module.check_mode:
|
||||
# VM can be in either halted, suspended, paused or running state.
|
||||
# For VM to be in running state, start has to be called on halted,
|
||||
# resume on suspended and unpause on paused VM.
|
||||
if vm_power_state_current == "poweredoff":
|
||||
xapi_session.xenapi.VM.start(vm_ref, False, False)
|
||||
elif vm_power_state_current == "suspended":
|
||||
xapi_session.xenapi.VM.resume(vm_ref, False, False)
|
||||
elif vm_power_state_current == "paused":
|
||||
xapi_session.xenapi.VM.unpause(vm_ref)
|
||||
elif power_state == "poweredoff":
|
||||
if not module.check_mode:
|
||||
# hard_shutdown will halt VM regardless of current state.
|
||||
xapi_session.xenapi.VM.hard_shutdown(vm_ref)
|
||||
elif power_state == "restarted":
|
||||
# hard_reboot will restart VM only if VM is in paused or running state.
|
||||
if vm_power_state_current in ["paused", "poweredon"]:
|
||||
if not module.check_mode:
|
||||
xapi_session.xenapi.VM.hard_reboot(vm_ref)
|
||||
else:
|
||||
module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current)
|
||||
elif power_state == "suspended":
|
||||
# running state is required for suspend.
|
||||
if vm_power_state_current == "poweredon":
|
||||
if not module.check_mode:
|
||||
xapi_session.xenapi.VM.suspend(vm_ref)
|
||||
else:
|
||||
module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current)
|
||||
elif power_state == "shutdownguest":
|
||||
# running state is required for guest shutdown.
|
||||
if vm_power_state_current == "poweredon":
|
||||
if not module.check_mode:
|
||||
if timeout == 0:
|
||||
xapi_session.xenapi.VM.clean_shutdown(vm_ref)
|
||||
else:
|
||||
task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref)
|
||||
task_result = wait_for_task(module, task_ref, timeout)
|
||||
|
||||
if task_result:
|
||||
module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result)
|
||||
else:
|
||||
module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current)
|
||||
elif power_state == "rebootguest":
|
||||
# running state is required for guest reboot.
|
||||
if vm_power_state_current == "poweredon":
|
||||
if not module.check_mode:
|
||||
if timeout == 0:
|
||||
xapi_session.xenapi.VM.clean_reboot(vm_ref)
|
||||
else:
|
||||
task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref)
|
||||
task_result = wait_for_task(module, task_ref, timeout)
|
||||
|
||||
if task_result:
|
||||
module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result)
|
||||
else:
|
||||
module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current)
|
||||
else:
|
||||
module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state)
|
||||
|
||||
state_changed = True
|
||||
except XenAPI.Failure as f:
|
||||
module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
|
||||
return (state_changed, vm_power_state_resulting)
|
||||
|
||||
|
||||
def wait_for_task(module, task_ref, timeout=300):
|
||||
"""Waits for async XAPI task to finish.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
task_ref (str): XAPI reference to task.
|
||||
timeout (int): timeout in seconds (default: 300).
|
||||
|
||||
Returns:
|
||||
str: failure message on failure, else an empty string.
|
||||
"""
|
||||
# Fail if we don't have a valid task reference.
|
||||
if not task_ref or task_ref == "OpaqueRef:NULL":
|
||||
module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!")
|
||||
|
||||
xapi_session = XAPI.connect(module)
|
||||
|
||||
interval = 2
|
||||
|
||||
result = ""
|
||||
|
||||
# If we have to wait indefinitely, make time_left larger than 0 so we can
|
||||
# enter while loop.
|
||||
if timeout == 0:
|
||||
time_left = 1
|
||||
else:
|
||||
time_left = timeout
|
||||
|
||||
try:
|
||||
while time_left > 0:
|
||||
task_status = xapi_session.xenapi.task.get_status(task_ref).lower()
|
||||
|
||||
if task_status == "pending":
|
||||
# Task is still running.
|
||||
time.sleep(interval)
|
||||
|
||||
# We decrease time_left only if we don't wait indefinitely.
|
||||
if timeout != 0:
|
||||
time_left -= interval
|
||||
|
||||
continue
|
||||
elif task_status == "success":
|
||||
# Task is done.
|
||||
break
|
||||
else:
|
||||
# Task failed.
|
||||
result = task_status
|
||||
break
|
||||
else:
|
||||
# We timed out.
|
||||
result = "timeout"
|
||||
|
||||
xapi_session.xenapi.task.destroy(task_ref)
|
||||
except XenAPI.Failure as f:
|
||||
module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def wait_for_vm_ip_address(module, vm_ref, timeout=300):
|
||||
"""Waits for VM to acquire an IP address.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
vm_ref (str): XAPI reference to VM.
|
||||
timeout (int): timeout in seconds (default: 300).
|
||||
|
||||
Returns:
|
||||
dict: VM guest metrics as retrieved by
|
||||
VM_guest_metrics.get_record() XAPI method with info
|
||||
on IP address acquired.
|
||||
"""
|
||||
# Fail if we don't have a valid VM reference.
|
||||
if not vm_ref or vm_ref == "OpaqueRef:NULL":
|
||||
module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!")
|
||||
|
||||
xapi_session = XAPI.connect(module)
|
||||
|
||||
vm_guest_metrics = {}
|
||||
|
||||
try:
|
||||
# We translate VM power state string so that error message can be
|
||||
# consistent with module VM power states.
|
||||
vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
|
||||
|
||||
if vm_power_state != 'poweredon':
|
||||
module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state)
|
||||
|
||||
interval = 2
|
||||
|
||||
# If we have to wait indefinitely, make time_left larger than 0 so we can
|
||||
# enter while loop.
|
||||
if timeout == 0:
|
||||
time_left = 1
|
||||
else:
|
||||
time_left = timeout
|
||||
|
||||
while time_left > 0:
|
||||
vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref)
|
||||
|
||||
if vm_guest_metrics_ref != "OpaqueRef:NULL":
|
||||
vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
|
||||
vm_ips = vm_guest_metrics['networks']
|
||||
|
||||
if "0/ip" in vm_ips:
|
||||
break
|
||||
|
||||
time.sleep(interval)
|
||||
|
||||
# We decrease time_left only if we don't wait indefinitely.
|
||||
if timeout != 0:
|
||||
time_left -= interval
|
||||
else:
|
||||
# We timed out.
|
||||
module.fail_json(msg="Timed out waiting for VM IP address!")
|
||||
|
||||
except XenAPI.Failure as f:
|
||||
module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
|
||||
return vm_guest_metrics
|
||||
|
||||
|
||||
def get_xenserver_version(module):
|
||||
"""Returns XenServer version.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
|
||||
Returns:
|
||||
list: Element [0] is major version. Element [1] is minor version.
|
||||
Element [2] is update number.
|
||||
"""
|
||||
xapi_session = XAPI.connect(module)
|
||||
|
||||
host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
|
||||
|
||||
try:
|
||||
xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
|
||||
except ValueError:
|
||||
xenserver_version = [0, 0, 0]
|
||||
|
||||
return xenserver_version
|
||||
|
||||
|
||||
class XAPI(object):
|
||||
"""Class for XAPI session management."""
|
||||
_xapi_session = None
|
||||
|
||||
@classmethod
|
||||
def connect(cls, module, disconnect_atexit=True):
|
||||
"""Establishes XAPI connection and returns session reference.
|
||||
|
||||
If no existing session is available, establishes a new one
|
||||
and returns it, else returns existing one.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
disconnect_atexit (bool): Controls if method should
|
||||
register atexit handler to disconnect from XenServer
|
||||
on module exit (default: True).
|
||||
|
||||
Returns:
|
||||
XAPI session reference.
|
||||
"""
|
||||
if cls._xapi_session is not None:
|
||||
return cls._xapi_session
|
||||
|
||||
hostname = module.params['hostname']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
ignore_ssl = not module.params['validate_certs']
|
||||
|
||||
if hostname == 'localhost':
|
||||
cls._xapi_session = XenAPI.xapi_local()
|
||||
username = ''
|
||||
password = ''
|
||||
else:
|
||||
# If scheme is not specified we default to http:// because https://
|
||||
# is problematic in most setups.
|
||||
if not hostname.startswith("http://") and not hostname.startswith("https://"):
|
||||
hostname = "http://%s" % hostname
|
||||
|
||||
try:
|
||||
# ignore_ssl is supported in XenAPI library from XenServer 7.2
|
||||
# SDK onward but there is no way to tell which version we
|
||||
# are using. TypeError will be raised if ignore_ssl is not
|
||||
# supported. Additionally, ignore_ssl requires Python 2.7.9
|
||||
# or newer.
|
||||
cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl)
|
||||
except TypeError:
|
||||
# Try without ignore_ssl.
|
||||
cls._xapi_session = XenAPI.Session(hostname)
|
||||
|
||||
if not password:
|
||||
password = ''
|
||||
|
||||
try:
|
||||
cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
|
||||
except XenAPI.Failure as f:
|
||||
module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details))
|
||||
|
||||
# Disabling atexit should be used in special cases only.
|
||||
if disconnect_atexit:
|
||||
atexit.register(cls._xapi_session.logout)
|
||||
|
||||
return cls._xapi_session
|
||||
|
||||
|
||||
class XenServerObject(object):
|
||||
"""Base class for all XenServer objects.
|
||||
|
||||
This class contains active XAPI session reference and common
|
||||
attributes with useful info about XenServer host/pool.
|
||||
|
||||
Attributes:
|
||||
module: Reference to Ansible module object.
|
||||
xapi_session: Reference to XAPI session.
|
||||
pool_ref (str): XAPI reference to a pool currently connected to.
|
||||
default_sr_ref (str): XAPI reference to a pool default
|
||||
Storage Repository.
|
||||
host_ref (str): XAPI rerefence to a host currently connected to.
|
||||
xenserver_version (list of str): Contains XenServer major and
|
||||
minor version.
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
"""Inits XenServerObject using common module parameters.
|
||||
|
||||
Args:
|
||||
module: Reference to Ansible module object.
|
||||
"""
|
||||
if not HAS_XENAPI:
|
||||
module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR)
|
||||
|
||||
self.module = module
|
||||
self.xapi_session = XAPI.connect(module)
|
||||
|
||||
try:
|
||||
self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0]
|
||||
self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref)
|
||||
self.xenserver_version = get_xenserver_version(module)
|
||||
except XenAPI.Failure as f:
|
||||
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
Reference in New Issue
Block a user