re-add collections; aap not happy

This commit is contained in:
2024-02-07 17:21:22 -05:00
parent 4ac2aab28d
commit 80f03628d2
3297 changed files with 461576 additions and 6 deletions

View File

@ -0,0 +1,465 @@
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
name: cgroup_perf_recap
type: aggregate
requirements:
- whitelist in configuration
- cgroups
short_description: Profiles system activity of tasks and full execution using cgroups
description:
- This is an ansible callback plugin utilizes cgroups to profile system activity of ansible and
individual tasks, and display a recap at the end of the playbook execution
notes:
- Requires ansible to be run from within a cgroup, such as with
C(cgexec -g cpuacct,memory,pids:ansible_profile ansible-playbook ...)
- This cgroup should only be used by ansible to get accurate results
- To create the cgroup, first use a command such as
C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g cpuacct,memory,pids:ansible_profile)
options:
control_group:
required: True
description: Name of cgroups control group
env:
- name: CGROUP_CONTROL_GROUP
ini:
- section: callback_cgroup_perf_recap
key: control_group
cpu_poll_interval:
description: Interval between CPU polling for determining CPU usage. A lower value may produce inaccurate
results, a higher value may not be short enough to collect results for short tasks.
default: 0.25
type: float
env:
- name: CGROUP_CPU_POLL_INTERVAL
ini:
- section: callback_cgroup_perf_recap
key: cpu_poll_interval
memory_poll_interval:
description: Interval between memory polling for determining memory usage. A lower value may produce inaccurate
results, a higher value may not be short enough to collect results for short tasks.
default: 0.25
type: float
env:
- name: CGROUP_MEMORY_POLL_INTERVAL
ini:
- section: callback_cgroup_perf_recap
key: memory_poll_interval
pid_poll_interval:
description: Interval between PID polling for determining PID count. A lower value may produce inaccurate
results, a higher value may not be short enough to collect results for short tasks.
default: 0.25
type: float
env:
- name: CGROUP_PID_POLL_INTERVAL
ini:
- section: callback_cgroup_perf_recap
key: pid_poll_interval
display_recap:
description: Controls whether the recap is printed at the end, useful if you will automatically
process the output files
env:
- name: CGROUP_DISPLAY_RECAP
ini:
- section: callback_cgroup_perf_recap
key: display_recap
type: bool
default: true
file_name_format:
description: Format of filename. Accepts C(%(counter)s), C(%(task_uuid)s),
C(%(feature)s), C(%(ext)s). Defaults to C(%(feature)s.%(ext)s) when C(file_per_task) is C(False)
and C(%(counter)s-%(task_uuid)s-%(feature)s.%(ext)s) when C(True)
env:
- name: CGROUP_FILE_NAME_FORMAT
ini:
- section: callback_cgroup_perf_recap
key: file_name_format
type: str
default: '%(feature)s.%(ext)s'
output_dir:
description: Output directory for files containing recorded performance readings. If the value contains a
single %s, the start time of the playbook run will be inserted in that space. Only the deepest
level directory will be created if it does not exist, parent directories will not be created.
type: path
default: /tmp/ansible-perf-%s
env:
- name: CGROUP_OUTPUT_DIR
ini:
- section: callback_cgroup_perf_recap
key: output_dir
output_format:
description: Output format, either CSV or JSON-seq
env:
- name: CGROUP_OUTPUT_FORMAT
ini:
- section: callback_cgroup_perf_recap
key: output_format
type: str
default: csv
choices:
- csv
- json
file_per_task:
description: When set as C(True) along with C(write_files), this callback will write 1 file per task
instead of 1 file for the entire playbook run
env:
- name: CGROUP_FILE_PER_TASK
ini:
- section: callback_cgroup_perf_recap
key: file_per_task
type: bool
default: False
write_files:
description: Dictates whether files will be written containing performance readings
env:
- name: CGROUP_WRITE_FILES
ini:
- section: callback_cgroup_perf_recap
key: write_files
type: bool
default: false
'''
import csv
import datetime
import os
import time
import threading
from abc import ABCMeta, abstractmethod
from functools import partial
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six import with_metaclass
from ansible.parsing.ajson import AnsibleJSONEncoder, json
from ansible.plugins.callback import CallbackBase
RS = '\x1e' # RECORD SEPARATOR
LF = '\x0a' # LINE FEED
def dict_fromkeys(keys, default=None):
d = {}
for key in keys:
d[key] = default() if callable(default) else default
return d
class BaseProf(with_metaclass(ABCMeta, threading.Thread)):
def __init__(self, path, obj=None, writer=None):
threading.Thread.__init__(self) # pylint: disable=non-parent-init-called
self.obj = obj
self.path = path
self.max = 0
self.running = True
self.writer = writer
def run(self):
while self.running:
self.poll()
@abstractmethod
def poll(self):
pass
class MemoryProf(BaseProf):
"""Python thread for recording memory usage"""
def __init__(self, path, poll_interval=0.25, obj=None, writer=None):
super(MemoryProf, self).__init__(path, obj=obj, writer=writer)
self._poll_interval = poll_interval
def poll(self):
with open(self.path) as f:
val = int(f.read().strip()) / 1024**2
if val > self.max:
self.max = val
if self.writer:
try:
self.writer(time.time(), self.obj.get_name(), self.obj._uuid, val)
except ValueError:
# We may be profiling after the playbook has ended
self.running = False
time.sleep(self._poll_interval)
class CpuProf(BaseProf):
def __init__(self, path, poll_interval=0.25, obj=None, writer=None):
super(CpuProf, self).__init__(path, obj=obj, writer=writer)
self._poll_interval = poll_interval
def poll(self):
with open(self.path) as f:
start_time = time.time() * 1000**2
start_usage = int(f.read().strip()) / 1000
time.sleep(self._poll_interval)
with open(self.path) as f:
end_time = time.time() * 1000**2
end_usage = int(f.read().strip()) / 1000
val = (end_usage - start_usage) / (end_time - start_time) * 100
if val > self.max:
self.max = val
if self.writer:
try:
self.writer(time.time(), self.obj.get_name(), self.obj._uuid, val)
except ValueError:
# We may be profiling after the playbook has ended
self.running = False
class PidsProf(BaseProf):
def __init__(self, path, poll_interval=0.25, obj=None, writer=None):
super(PidsProf, self).__init__(path, obj=obj, writer=writer)
self._poll_interval = poll_interval
def poll(self):
with open(self.path) as f:
val = int(f.read().strip())
if val > self.max:
self.max = val
if self.writer:
try:
self.writer(time.time(), self.obj.get_name(), self.obj._uuid, val)
except ValueError:
# We may be profiling after the playbook has ended
self.running = False
time.sleep(self._poll_interval)
def csv_writer(writer, timestamp, task_name, task_uuid, value):
writer.writerow([timestamp, task_name, task_uuid, value])
def json_writer(writer, timestamp, task_name, task_uuid, value):
data = {
'timestamp': timestamp,
'task_name': task_name,
'task_uuid': task_uuid,
'value': value,
}
writer.write('%s%s%s' % (RS, json.dumps(data, cls=AnsibleJSONEncoder), LF))
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'ansible.posix.cgroup_perf_recap'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self._features = ('memory', 'cpu', 'pids')
self._units = {
'memory': 'MB',
'cpu': '%',
'pids': '',
}
self.task_results = dict_fromkeys(self._features, default=list)
self._profilers = dict.fromkeys(self._features)
self._files = dict.fromkeys(self._features)
self._writers = dict.fromkeys(self._features)
self._file_per_task = False
self._counter = 0
self.write_files = False
def _open_files(self, task_uuid=None):
output_format = self._output_format
output_dir = self._output_dir
for feature in self._features:
data = {
b'counter': to_bytes(self._counter),
b'task_uuid': to_bytes(task_uuid),
b'feature': to_bytes(feature),
b'ext': to_bytes(output_format)
}
if self._files.get(feature):
try:
self._files[feature].close()
except Exception:
pass
if self.write_files:
filename = self._file_name_format % data
self._files[feature] = open(os.path.join(output_dir, filename), 'w+')
if output_format == b'csv':
self._writers[feature] = partial(csv_writer, csv.writer(self._files[feature]))
elif output_format == b'json':
self._writers[feature] = partial(json_writer, self._files[feature])
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
cpu_poll_interval = self.get_option('cpu_poll_interval')
memory_poll_interval = self.get_option('memory_poll_interval')
pid_poll_interval = self.get_option('pid_poll_interval')
self._display_recap = self.get_option('display_recap')
control_group = to_bytes(self.get_option('control_group'), errors='surrogate_or_strict')
self.mem_max_file = b'/sys/fs/cgroup/memory/%s/memory.max_usage_in_bytes' % control_group
mem_current_file = b'/sys/fs/cgroup/memory/%s/memory.usage_in_bytes' % control_group
cpu_usage_file = b'/sys/fs/cgroup/cpuacct/%s/cpuacct.usage' % control_group
pid_current_file = b'/sys/fs/cgroup/pids/%s/pids.current' % control_group
for path in (self.mem_max_file, mem_current_file, cpu_usage_file, pid_current_file):
try:
with open(path) as f:
pass
except Exception as e:
self._display.warning(
u'Cannot open %s for reading (%s). Disabling %s' % (to_text(path), to_text(e), self.CALLBACK_NAME)
)
self.disabled = True
return
try:
with open(self.mem_max_file, 'w+') as f:
f.write('0')
except Exception as e:
self._display.warning(
u'Unable to reset max memory value in %s: %s' % (to_text(self.mem_max_file), to_text(e))
)
self.disabled = True
return
try:
with open(cpu_usage_file, 'w+') as f:
f.write('0')
except Exception as e:
self._display.warning(
u'Unable to reset CPU usage value in %s: %s' % (to_text(cpu_usage_file), to_text(e))
)
self.disabled = True
return
self._profiler_map = {
'memory': partial(MemoryProf, mem_current_file, poll_interval=memory_poll_interval),
'cpu': partial(CpuProf, cpu_usage_file, poll_interval=cpu_poll_interval),
'pids': partial(PidsProf, pid_current_file, poll_interval=pid_poll_interval),
}
self.write_files = self.get_option('write_files')
file_per_task = self.get_option('file_per_task')
self._output_format = to_bytes(self.get_option('output_format'))
output_dir = to_bytes(self.get_option('output_dir'), errors='surrogate_or_strict')
try:
output_dir %= to_bytes(datetime.datetime.now().isoformat())
except TypeError:
pass
self._output_dir = output_dir
file_name_format = to_bytes(self.get_option('file_name_format'))
if self.write_files:
if file_per_task:
self._file_per_task = True
if file_name_format == b'%(feature)s.%(ext)s':
file_name_format = b'%(counter)s-%(task_uuid)s-%(feature)s.%(ext)s'
else:
file_name_format = to_bytes(self.get_option('file_name_format'))
self._file_name_format = file_name_format
if not os.path.exists(output_dir):
try:
os.mkdir(output_dir)
except Exception as e:
self._display.warning(
u'Could not create the output directory at %s: %s' % (to_text(output_dir), to_text(e))
)
self.disabled = True
return
if not self._file_per_task:
self._open_files()
def _profile(self, obj=None):
prev_task = None
results = dict.fromkeys(self._features)
if not obj or self._file_per_task:
for dummy, f in self._files.items():
if f is None:
continue
try:
f.close()
except Exception:
pass
try:
for name, prof in self._profilers.items():
prof.running = False
for name, prof in self._profilers.items():
results[name] = prof.max
prev_task = prof.obj
except AttributeError:
pass
for name, result in results.items():
if result is not None:
try:
self.task_results[name].append((prev_task, result))
except ValueError:
pass
if obj is not None:
if self._file_per_task or self._counter == 0:
self._open_files(task_uuid=obj._uuid)
for feature in self._features:
self._profilers[feature] = self._profiler_map[feature](obj=obj, writer=self._writers[feature])
self._profilers[feature].start()
self._counter += 1
def v2_playbook_on_task_start(self, task, is_conditional):
self._profile(task)
def v2_playbook_on_stats(self, stats):
self._profile()
if not self._display_recap:
return
with open(self.mem_max_file) as f:
max_results = int(f.read().strip()) / 1024 / 1024
self._display.banner('CGROUP PERF RECAP')
self._display.display('Memory Execution Maximum: %0.2fMB\n' % max_results)
for name, data in self.task_results.items():
if name == 'memory':
continue
try:
self._display.display(
'%s Execution Maximum: %0.2f%s\n' % (name, max((t[1] for t in data)), self._units[name])
)
except Exception as e:
self._display.display('%s profiling error: no results collected: %s\n' % (name, e))
self._display.display('\n')
for name, data in self.task_results.items():
if data:
self._display.display('%s:\n' % name)
for task, value in data:
self._display.display('%s (%s): %0.2f%s' % (task.get_name(), task._uuid, value, self._units[name]))
self._display.display('\n')

View File

@ -0,0 +1,53 @@
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: debug
type: stdout
short_description: formatted stdout/stderr display
description:
- Use this callback to sort through extensive debug output
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default): # pylint: disable=too-few-public-methods,no-init
'''
Override for the default callback module.
Render std err/out outside of the rest of the result which it prints with
indentation.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'ansible.posix.debug'
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
'''Return the text to output for a result.'''
# Enable JSON identation
result['_ansible_verbose_always'] = True
save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg', 'module_stdout', 'module_stderr']:
if key in result:
save[key] = result.pop(key)
output = CallbackModule_default._dump_results(self, result)
for key in ['stdout', 'stderr', 'msg', 'module_stdout', 'module_stderr']:
if key in save and save[key]:
output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
for key, value in save.items():
result[key] = value
return output

View File

@ -0,0 +1,197 @@
# (c) 2016, Matt Martz <matt@sivel.net>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: json
short_description: Ansible screen output as JSON
description:
- This callback converts all events into JSON output to stdout
type: stdout
requirements:
- Set as stdout in config
options:
show_custom_stats:
name: Show custom stats
description: 'This adds the custom stats set via the set_stats plugin to the play recap'
default: False
env:
- name: ANSIBLE_SHOW_CUSTOM_STATS
ini:
- key: show_custom_stats
section: defaults
type: bool
json_indent:
name: Use indenting for the JSON output
description: 'If specified, use this many spaces for indenting in the JSON output. If <= 0, write to a single line.'
default: 4
env:
- name: ANSIBLE_JSON_INDENT
ini:
- key: json_indent
section: defaults
type: integer
notes:
- When using a strategy such as free, host_pinned, or a custom strategy, host results will
be added to new task results in ``.plays[].tasks[]``. As such, there will exist duplicate
task objects indicated by duplicate task IDs at ``.plays[].tasks[].task.id``, each with an
individual host result for the task.
'''
import datetime
import json
from functools import partial
from ansible.inventory.host import Host
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
LOCKSTEP_CALLBACKS = frozenset(('linear', 'debug'))
def current_time():
return '%sZ' % datetime.datetime.utcnow().isoformat()
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'ansible.posix.json'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self.results = []
self._task_map = {}
self._is_lockstep = False
self.set_options()
self._json_indent = self.get_option('json_indent')
if self._json_indent <= 0:
self._json_indent = None
def _new_play(self, play):
self._is_lockstep = play.strategy in LOCKSTEP_CALLBACKS
return {
'play': {
'name': play.get_name(),
'id': to_text(play._uuid),
'path': to_text(play.get_path()),
'duration': {
'start': current_time()
}
},
'tasks': []
}
def _new_task(self, task):
return {
'task': {
'name': task.get_name(),
'id': to_text(task._uuid),
'path': to_text(task.get_path()),
'duration': {
'start': current_time()
}
},
'hosts': {}
}
def _find_result_task(self, host, task):
key = (host.get_name(), task._uuid)
return self._task_map.get(
key,
self.results[-1]['tasks'][-1]
)
def v2_playbook_on_play_start(self, play):
self.results.append(self._new_play(play))
def v2_runner_on_start(self, host, task):
if self._is_lockstep:
return
key = (host.get_name(), task._uuid)
task_result = self._new_task(task)
self._task_map[key] = task_result
self.results[-1]['tasks'].append(task_result)
def v2_playbook_on_task_start(self, task, is_conditional):
if not self._is_lockstep:
return
self.results[-1]['tasks'].append(self._new_task(task))
def v2_playbook_on_handler_task_start(self, task):
if not self._is_lockstep:
return
self.results[-1]['tasks'].append(self._new_task(task))
def _convert_host_to_name(self, key):
if isinstance(key, (Host,)):
return key.get_name()
return key
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
summary = {}
for h in hosts:
s = stats.summarize(h)
summary[h] = s
custom_stats = {}
global_custom_stats = {}
if self.get_option('show_custom_stats') and stats.custom:
custom_stats.update(dict((self._convert_host_to_name(k), v) for k, v in stats.custom.items()))
global_custom_stats.update(custom_stats.pop('_run', {}))
output = {
'plays': self.results,
'stats': summary,
'custom_stats': custom_stats,
'global_custom_stats': global_custom_stats,
}
self._display.display(json.dumps(output, cls=AnsibleJSONEncoder, indent=self._json_indent, sort_keys=True))
def _record_task_result(self, on_info, result, **kwargs):
"""This function is used as a partial to add failed/skipped info in a single method"""
host = result._host
task = result._task
result_copy = result._result.copy()
result_copy.update(on_info)
result_copy['action'] = task.action
task_result = self._find_result_task(host, task)
task_result['hosts'][host.name] = result_copy
end_time = current_time()
task_result['task']['duration']['end'] = end_time
self.results[-1]['play']['duration']['end'] = end_time
if not self._is_lockstep:
key = (host.get_name(), task._uuid)
del self._task_map[key]
def __getattribute__(self, name):
"""Return ``_record_task_result`` partial with a dict containing skipped/failed if necessary"""
if name not in ('v2_runner_on_ok', 'v2_runner_on_failed', 'v2_runner_on_unreachable', 'v2_runner_on_skipped'):
return object.__getattribute__(self, name)
on = name.rsplit('_', 1)[1]
on_info = {}
if on in ('failed', 'skipped'):
on_info[on] = True
return partial(self._record_task_result, on_info)

View File

@ -0,0 +1,214 @@
# (c) 2016, Matt Martz <matt@sivel.net>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: jsonl
short_description: Ansible screen output as JSONL (lines in json format)
description:
- This callback converts all events into JSON output to stdout
- This callback in contrast with ansible.posix.json uses less memory, because it doesn't store results.
type: stdout
requirements:
- Set as stdout in config
options:
show_custom_stats:
name: Show custom stats
description: 'This adds the custom stats set via the set_stats plugin to the play recap'
default: False
env:
- name: ANSIBLE_SHOW_CUSTOM_STATS
ini:
- key: show_custom_stats
section: defaults
type: bool
json_indent:
name: Use indenting for the JSON output
description: 'If specified, use this many spaces for indenting in the JSON output. If not specified or <= 0, write to a single line.'
default: 0
env:
- name: ANSIBLE_JSON_INDENT
ini:
- key: json_indent
section: defaults
type: integer
notes:
- When using a strategy such as free, host_pinned, or a custom strategy, host results will
be added to new task results in ``.plays[].tasks[]``. As such, there will exist duplicate
task objects indicated by duplicate task IDs at ``.plays[].tasks[].task.id``, each with an
individual host result for the task.
'''
import datetime
import json
import copy
from functools import partial
from ansible.inventory.host import Host
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
LOCKSTEP_CALLBACKS = frozenset(('linear', 'debug'))
def current_time():
return '%sZ' % datetime.datetime.utcnow().isoformat()
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'ansible.posix.jsonl'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self.results = []
self._task_map = {}
self._is_lockstep = False
self.set_options()
self._json_indent = self.get_option('json_indent')
if self._json_indent <= 0:
self._json_indent = None
def _new_play(self, play):
self._is_lockstep = play.strategy in LOCKSTEP_CALLBACKS
return {
'play': {
'name': play.get_name(),
'id': to_text(play._uuid),
'path': to_text(play.get_path()),
'duration': {
'start': current_time()
}
},
'tasks': []
}
def _new_task(self, task):
return {
'task': {
'name': task.get_name(),
'id': to_text(task._uuid),
'path': to_text(task.get_path()),
'duration': {
'start': current_time()
}
},
'hosts': {}
}
def _find_result_task(self, host, task):
key = (host.get_name(), task._uuid)
return self._task_map.get(
key,
self.results[-1]['tasks'][-1]
)
def v2_playbook_on_play_start(self, play):
play_result = self._new_play(play)
self.results.append(play_result)
self._write_event('v2_playbook_on_play_start', play_result)
def v2_runner_on_start(self, host, task):
if self._is_lockstep:
return
key = (host.get_name(), task._uuid)
task_result = self._new_task(task)
self._task_map[key] = task_result
self.results[-1]['tasks'].append(task_result)
self._write_event('v2_runner_on_start', task_result)
def v2_playbook_on_task_start(self, task, is_conditional):
if not self._is_lockstep:
return
task_result = self._new_task(task)
self.results[-1]['tasks'].append(task_result)
self._write_event('v2_playbook_on_task_start', task_result)
def v2_playbook_on_handler_task_start(self, task):
if not self._is_lockstep:
return
task_result = self._new_task(task)
self.results[-1]['tasks'].append(task_result)
self._write_event('v2_playbook_on_handler_task_start', task_result)
def _convert_host_to_name(self, key):
if isinstance(key, (Host,)):
return key.get_name()
return key
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
summary = {}
for h in hosts:
s = stats.summarize(h)
summary[h] = s
custom_stats = {}
global_custom_stats = {}
if self.get_option('show_custom_stats') and stats.custom:
custom_stats.update(dict((self._convert_host_to_name(k), v) for k, v in stats.custom.items()))
global_custom_stats.update(custom_stats.pop('_run', {}))
output = {
'stats': summary,
'custom_stats': custom_stats,
'global_custom_stats': global_custom_stats,
}
self._write_event('v2_playbook_on_stats', output)
def _write_event(self, event_name, output):
output['_event'] = event_name
output['_timestamp'] = current_time()
self._display.display(json.dumps(output, cls=AnsibleJSONEncoder, indent=self._json_indent, separators=',:', sort_keys=True))
def _record_task_result(self, event_name, on_info, result, **kwargs):
"""This function is used as a partial to add failed/skipped info in a single method"""
host = result._host
task = result._task
result_copy = result._result.copy()
result_copy.update(on_info)
result_copy['action'] = task.action
task_result = self._find_result_task(host, task)
end_time = current_time()
task_result['task']['duration']['end'] = end_time
self.results[-1]['play']['duration']['end'] = end_time
task_result_copy = copy.deepcopy(task_result)
task_result_copy['hosts'][host.name] = result_copy
if not self._is_lockstep:
key = (host.get_name(), task._uuid)
del self._task_map[key]
self._write_event(event_name, task_result_copy)
def __getattribute__(self, name):
"""Return ``_record_task_result`` partial with a dict containing skipped/failed if necessary"""
if name not in ('v2_runner_on_ok', 'v2_runner_on_failed', 'v2_runner_on_unreachable', 'v2_runner_on_skipped'):
return object.__getattribute__(self, name)
on = name.rsplit('_', 1)[1]
on_info = {}
if on in ('failed', 'skipped'):
on_info[on] = True
return partial(self._record_task_result, name, on_info)

View File

@ -0,0 +1,118 @@
# (c) 2017, Tennis Smith, https://github.com/gamename
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: profile_roles
type: aggregate
short_description: adds timing information to roles
description:
- This callback module provides profiling for ansible roles.
requirements:
- whitelisting in configuration
'''
import collections
import time
from ansible.plugins.callback import CallbackBase
from ansible.module_utils.six.moves import reduce
# define start time
t0 = tn = time.time()
def secondsToStr(t):
# http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(
reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
def filled(msg, fchar="*"):
if len(msg) == 0:
width = 79
else:
msg = "%s " % msg
width = 79 - len(msg)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (msg, filler)
def timestamp(self):
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
self.totals[self.current] += self.stats[self.current]
def tasktime():
global tn
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secondsToStr(time.time() - tn)
time_total_elapsed = secondsToStr(time.time() - t0)
tn = time.time()
return filled('%s (%s)%s%s' %
(time_current, time_elapsed, ' ' * 7, time_total_elapsed))
class CallbackModule(CallbackBase):
"""
This callback module provides profiling for ansible roles.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'ansible.posix.profile_roles'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.stats = collections.Counter()
self.totals = collections.Counter()
self.current = None
super(CallbackModule, self).__init__()
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
timestamp(self)
if task._role:
self.current = task._role._role_name
else:
self.current = task.action
self.stats[self.current] = time.time()
def v2_playbook_on_task_start(self, task, is_conditional):
self._record_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display.display(filled("", fchar="="))
timestamp(self)
total_time = sum(self.totals.values())
# Print the timings starting with the largest one
for result in self.totals.most_common():
msg = u"{0:-<70}{1:->9}".format(result[0] + u' ', u' {0:.02f}s'.format(result[1]))
self._display.display(msg)
msg_total = u"{0:-<70}{1:->9}".format(u'total ', u' {0:.02f}s'.format(total_time))
self._display.display(filled("", fchar="~"))
self._display.display(msg_total)

View File

@ -0,0 +1,201 @@
# (C) 2016, Joel, https://github.com/jjshoe
# (C) 2015, Tom Paine, <github@aioue.net>
# (C) 2014, Jharrod LaFon, @JharrodLaFon
# (C) 2012-2013, Michael DeHaan, <michael.dehaan@gmail.com>
# (C) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: profile_tasks
type: aggregate
short_description: adds time information to tasks
description:
- Ansible callback plugin for timing individual tasks and overall execution time.
- "Mashup of 2 excellent original works: https://github.com/jlafon/ansible-profile,
https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old"
- "Format: C(<task start timestamp>) C(<length of previous task>) C(<current elapsed playbook execution time>)"
- It also lists the top/bottom time consuming tasks in the summary (configurable)
- Before 2.4 only the environment variables were available for configuration.
requirements:
- enable in configuration - see examples section below for details.
options:
output_limit:
description: Number of tasks to display in the summary
default: 20
env:
- name: PROFILE_TASKS_TASK_OUTPUT_LIMIT
ini:
- section: callback_profile_tasks
key: task_output_limit
sort_order:
description: Adjust the sorting output of summary tasks
choices: ['descending', 'ascending', 'none']
default: 'descending'
env:
- name: PROFILE_TASKS_SORT_ORDER
ini:
- section: callback_profile_tasks
key: sort_order
'''
EXAMPLES = '''
example: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callbacks_enabled=ansible.posix.profile_tasks
sample output: >
#
# TASK: [ensure messaging security group exists] ********************************
# Thursday 11 June 2017 22:50:53 +0100 (0:00:00.721) 0:00:05.322 *********
# ok: [localhost]
#
# TASK: [ensure db security group exists] ***************************************
# Thursday 11 June 2017 22:50:54 +0100 (0:00:00.558) 0:00:05.880 *********
# changed: [localhost]
#
'''
import collections
import time
from ansible.module_utils.six.moves import reduce
from ansible.plugins.callback import CallbackBase
# define start time
t0 = tn = time.time()
def secondsToStr(t):
# http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
def filled(msg, fchar="*"):
if len(msg) == 0:
width = 79
else:
msg = "%s " % msg
width = 79 - len(msg)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (msg, filler)
def timestamp(self):
if self.current is not None:
elapsed = time.time() - self.stats[self.current]['started']
self.stats[self.current]['elapsed'] += elapsed
def tasktime():
global tn
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secondsToStr(time.time() - tn)
time_total_elapsed = secondsToStr(time.time() - t0)
tn = time.time()
return filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed))
class CallbackModule(CallbackBase):
"""
This callback module provides per-task timing, ongoing playbook elapsed time
and ordered list of top 20 longest running tasks at end.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'ansible.posix.profile_tasks'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.stats = collections.OrderedDict()
self.current = None
self.sort_order = None
self.task_output_limit = None
super(CallbackModule, self).__init__()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.sort_order = self.get_option('sort_order')
if self.sort_order is not None:
if self.sort_order == 'ascending':
self.sort_order = False
elif self.sort_order == 'descending':
self.sort_order = True
elif self.sort_order == 'none':
self.sort_order = None
self.task_output_limit = self.get_option('output_limit')
if self.task_output_limit is not None:
if self.task_output_limit == 'all':
self.task_output_limit = None
else:
self.task_output_limit = int(self.task_output_limit)
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
timestamp(self)
# Record the start time of the current task
# stats[TASK_UUID]:
# started: Current task start time. This value will be updated each time a task
# with the same UUID is executed when `serial` is specified in a playbook.
# elapsed: Elapsed time since the first serialized task was started
self.current = task._uuid
if self.current not in self.stats:
self.stats[self.current] = {'started': time.time(), 'elapsed': 0.0, 'name': task.get_name()}
else:
self.stats[self.current]['started'] = time.time()
if self._display.verbosity >= 2:
self.stats[self.current]['path'] = task.get_path()
def v2_playbook_on_task_start(self, task, is_conditional):
self._record_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display.display(filled("", fchar="="))
timestamp(self)
self.current = None
results = list(self.stats.items())
# Sort the tasks by the specified sort
if self.sort_order is not None:
results = sorted(
self.stats.items(),
key=lambda x: x[1]['elapsed'],
reverse=self.sort_order,
)
# Display the number of tasks specified or the default of 20
results = list(results)[:self.task_output_limit]
# Print the timings
for uuid, result in results:
msg = u"{0:-<{2}}{1:->9}".format(result['name'] + u' ', u' {0:.02f}s'.format(result['elapsed']), self._display.columns - 9)
if 'path' in result:
msg += u"\n{0:-<{1}}".format(result['path'] + u' ', self._display.columns)
self._display.display(msg)

View File

@ -0,0 +1,43 @@
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: skippy
type: stdout
requirements:
- set as main display callback
short_description: Ansible screen output that ignores skipped status
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_at_date: '2022-06-01'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
extends_documentation_fragment:
- default_callback
description:
- This callback does the same as the default except it does not output skipped host/task/item status
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'ansible.posix.skippy'
def v2_runner_on_skipped(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass

View File

@ -0,0 +1,49 @@
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: timer
type: aggregate
requirements:
- whitelist in configuration
short_description: Adds time to play stats
description:
- This callback just adds total play duration to the play stats.
'''
from datetime import datetime
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback module tells you how long your plays ran for.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'ansible.posix.timer'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self.start_time = datetime.utcnow()
def days_hours_minutes_seconds(self, runtime):
minutes = (runtime.seconds // 60) % 60
r_seconds = runtime.seconds % 60
return runtime.days, runtime.seconds // 3600, minutes, r_seconds
def playbook_on_stats(self, stats):
self.v2_playbook_on_stats(stats)
def v2_playbook_on_stats(self, stats):
end_time = datetime.utcnow()
runtime = end_time - self.start_time
self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime)))